ngram
listlengths
0
67.8k
[ "pass def fit(self): pass def transform(self): pass def fit_transform(self, X): X = aligned(X)", "IndicatorPipeLine: def __init__(self): pass def fit(self): pass def transform(self): pass def fit_transform(self, X):", "digest collectively, there is no group mind to think collectively. |||||||||||| # #", "transform(self): pass def fit_transform(self, X): X = aligned(X) X = sup_res_align(X) # X", "process_dates(X) return X def aligned(df): for column in df: if 'direction' in column:", "if 'direction' in column: df[column] = df[column] == df['signal'] for column in df:", "df['signal'] for column in df: if 'tied' in column: df[column] = df.apply(lambda x:", "# # |||| The merit of a design is the only credential that", "think collectively. |||||||||||| # # |||| Each man must accept responsibility for his", "\"\"\" class IndicatorPipeLine: def __init__(self): pass def fit(self): pass def transform(self): pass def", "for column in df: if 'direction' in column: df[column] = df[column] == df['signal']", "if x['signal'] else -x[column], axis=1) return df def sup_res_align(df): for column in df:", "by his own judgment. ||||||||||||| # # |||| If a man believes a", "Each man must accept responsibility for his own life, each must be sovereign", "df def sup_res_align(df): for column in df: if 'sup' in column: df[column +", "df['signal']) & (df['signal'])) if 'res' in column: df[column + '_aligned'] = ((df[column] ==", "his own life, each must be sovereign by his own judgment. ||||||||||||| #", "Machine Learning...' on 29/01/2018. \"\"\" class IndicatorPipeLine: def __init__(self): pass def fit(self): pass", "= df.index pre = 'dt_' attr_list = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Hour',", "accept responsibility for his own life, each must be sovereign by his own", "column: df[column] = df.apply(lambda x: x[column] if x['signal'] else -x[column], axis=1) return df", "== df['signal'] for column in df: if 'tied' in column: df[column] = df.apply(lambda", "credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\" .. moduleauthor::", "def aligned(df): for column in df: if 'direction' in column: df[column] = df[column]", "stomach to digest collectively, there is no group mind to think collectively. ||||||||||||", "'direction' in column: df[column] = df[column] == df['signal'] for column in df: if", "= 'dt_' attr_list = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Hour', 'Minute'] for attr", "# |||| The merit of a design is the only credential that you", "The merit of a design is the only credential that you require. ||||||||||||||||||||||||||||||||||||||||||||||||", "column: df[column] = df[column] == df['signal'] for column in df: if 'tied' in", "be willing to break all established conventions to accomplish it. |||||||||||| # #", "moduleauthor:: <NAME> Studying Pipelines through 'Hands-On Machine Learning...' on 29/01/2018. \"\"\" class IndicatorPipeLine:", "X = sup_res_align(X) # X = process_dates(X) return X def aligned(df): for column", "if 'tied' in column: df[column] = df.apply(lambda x: x[column] if x['signal'] else -x[column],", "pre = 'dt_' attr_list = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Hour', 'Minute'] for", "a thinker means to go by the factual evidence of a case, not", "# |||| If a man believes a claim to be true, then he", "If a man believes a claim to be true, then he must hold", "'Hands-On Machine Learning...' on 29/01/2018. \"\"\" class IndicatorPipeLine: def __init__(self): pass def fit(self):", "all established conventions to accomplish it. |||||||||||| # # |||| The merit of", "df def process_dates(df): fld = df.index pre = 'dt_' attr_list = ['Year', 'Month',", "be a thinker means to go by the factual evidence of a case,", "own judgment. ||||||||||||| # # |||| If a man believes a claim to", "by the judgment of others |||||||||||||||||| # # |||| As there is no", "even though society opposes him. ||||||| # # |||| Not only know what", "society opposes him. ||||||| # # |||| Not only know what you want,", "'Day', 'Dayofweek', 'Hour', 'Minute'] for attr in attr_list: df[pre + attr] = getattr(fld.dt,", "x['signal'] else -x[column], axis=1) return df def sup_res_align(df): for column in df: if", "axis=1) return df def sup_res_align(df): for column in df: if 'sup' in column:", "group stomach to digest collectively, there is no group mind to think collectively.", "((df[column] == df['signal']) & (df['signal'])) if 'res' in column: df[column + '_aligned'] =", "group mind to think collectively. |||||||||||| # # |||| Each man must accept", "be sovereign by his own judgment. ||||||||||||| # # |||| If a man", "is the only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ########################################################################################################################", "|||| To be a thinker means to go by the factual evidence of", "for column in df: if 'sup' in column: df[column + '_aligned'] = ((df[column]", "(df['signal'])) if 'res' in column: df[column + '_aligned'] = ((df[column] == ~df['signal']) &", "in df: if 'direction' in column: df[column] = df[column] == df['signal'] for column", "judgment of others |||||||||||||||||| # # |||| As there is no group stomach", "~df['signal']) & (~df['signal'])) return df def process_dates(df): fld = df.index pre = 'dt_'", "of others |||||||||||||||||| # # |||| As there is no group stomach to", "& (~df['signal'])) return df def process_dates(df): fld = df.index pre = 'dt_' attr_list", "established conventions to accomplish it. |||||||||||| # # |||| The merit of a", "((df[column] == ~df['signal']) & (~df['signal'])) return df def process_dates(df): fld = df.index pre", "<filename>aquitania/data_processing/indicator_pipeline.py ######################################################################################################################## # |||||||||||||||||||||||||||||||||||||||||||||||||| AQUITANIA ||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||| To", "there is no group stomach to digest collectively, there is no group mind", "only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\" ..", "|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\" .. moduleauthor:: <NAME> Studying Pipelines through 'Hands-On Machine Learning...'", "df: if 'direction' in column: df[column] = df[column] == df['signal'] for column in", "(~df['signal'])) return df def process_dates(df): fld = df.index pre = 'dt_' attr_list =", "'Month', 'Week', 'Day', 'Dayofweek', 'Hour', 'Minute'] for attr in attr_list: df[pre + attr]", "only know what you want, but be willing to break all established conventions", "x[column] if x['signal'] else -x[column], axis=1) return df def sup_res_align(df): for column in", "+ '_aligned'] = ((df[column] == ~df['signal']) & (~df['signal'])) return df def process_dates(df): fld", "'Week', 'Day', 'Dayofweek', 'Hour', 'Minute'] for attr in attr_list: df[pre + attr] =", "fit(self): pass def transform(self): pass def fit_transform(self, X): X = aligned(X) X =", "pass def fit_transform(self, X): X = aligned(X) X = sup_res_align(X) # X =", "'dt_' attr_list = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Hour', 'Minute'] for attr in", "|||||||||||| # # |||| Each man must accept responsibility for his own life,", "x: x[column] if x['signal'] else -x[column], axis=1) return df def sup_res_align(df): for column", "his own judgment. ||||||||||||| # # |||| If a man believes a claim", "must hold to this belief even though society opposes him. ||||||| # #", "then he must hold to this belief even though society opposes him. |||||||", "# ######################################################################################################################## \"\"\" .. moduleauthor:: <NAME> Studying Pipelines through 'Hands-On Machine Learning...' on", "|||| If a man believes a claim to be true, then he must", "but be willing to break all established conventions to accomplish it. |||||||||||| #", "thinker means to go by the factual evidence of a case, not by", "['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Hour', 'Minute'] for attr in attr_list: df[pre +", "others |||||||||||||||||| # # |||| As there is no group stomach to digest", "df[column + '_aligned'] = ((df[column] == ~df['signal']) & (~df['signal'])) return df def process_dates(df):", "# # |||| To be a thinker means to go by the factual", "df[column] = df[column] == df['signal'] for column in df: if 'tied' in column:", "__init__(self): pass def fit(self): pass def transform(self): pass def fit_transform(self, X): X =", "life, each must be sovereign by his own judgment. ||||||||||||| # # ||||", "# |||| As there is no group stomach to digest collectively, there is", "= df[column] == df['signal'] for column in df: if 'tied' in column: df[column]", "design is the only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| #", "def fit(self): pass def transform(self): pass def fit_transform(self, X): X = aligned(X) X", "# |||| To be a thinker means to go by the factual evidence", "|||| As there is no group stomach to digest collectively, there is no", "claim to be true, then he must hold to this belief even though", "# # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\" .. moduleauthor:: <NAME> Studying Pipelines through 'Hands-On", "aligned(df): for column in df: if 'direction' in column: df[column] = df[column] ==", "means to go by the factual evidence of a case, not by the", "he must hold to this belief even though society opposes him. ||||||| #", "return df def process_dates(df): fld = df.index pre = 'dt_' attr_list = ['Year',", "believes a claim to be true, then he must hold to this belief", "a man believes a claim to be true, then he must hold to", "else -x[column], axis=1) return df def sup_res_align(df): for column in df: if 'sup'", "case, not by the judgment of others |||||||||||||||||| # # |||| As there", "to break all established conventions to accomplish it. |||||||||||| # # |||| The", "in df: if 'tied' in column: df[column] = df.apply(lambda x: x[column] if x['signal']", "'tied' in column: df[column] = df.apply(lambda x: x[column] if x['signal'] else -x[column], axis=1)", "know what you want, but be willing to break all established conventions to", "the only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\"", "sup_res_align(X) # X = process_dates(X) return X def aligned(df): for column in df:", "a case, not by the judgment of others |||||||||||||||||| # # |||| As", "######################################################################################################################## # |||||||||||||||||||||||||||||||||||||||||||||||||| AQUITANIA ||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||| To be", "|||| Each man must accept responsibility for his own life, each must be", "own life, each must be sovereign by his own judgment. ||||||||||||| # #", "To be a thinker means to go by the factual evidence of a", "merit of a design is the only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| #", "def process_dates(df): fld = df.index pre = 'dt_' attr_list = ['Year', 'Month', 'Week',", "require. |||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\" .. moduleauthor:: <NAME> Studying Pipelines", "|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||| To be a thinker means to go by the", "the judgment of others |||||||||||||||||| # # |||| As there is no group", "||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||| To be a thinker means to", ".. moduleauthor:: <NAME> Studying Pipelines through 'Hands-On Machine Learning...' on 29/01/2018. \"\"\" class", "+ '_aligned'] = ((df[column] == df['signal']) & (df['signal'])) if 'res' in column: df[column", "'res' in column: df[column + '_aligned'] = ((df[column] == ~df['signal']) & (~df['signal'])) return", "column: df[column + '_aligned'] = ((df[column] == ~df['signal']) & (~df['signal'])) return df def", "|||||||||||| # # |||| The merit of a design is the only credential", "to digest collectively, there is no group mind to think collectively. |||||||||||| #", "|||||||||||||||||| # # |||| As there is no group stomach to digest collectively,", "factual evidence of a case, not by the judgment of others |||||||||||||||||| #", "must accept responsibility for his own life, each must be sovereign by his", "||||||| # # |||| Not only know what you want, but be willing", "|||| The merit of a design is the only credential that you require.", "\"\"\" .. moduleauthor:: <NAME> Studying Pipelines through 'Hands-On Machine Learning...' on 29/01/2018. \"\"\"", "if 'res' in column: df[column + '_aligned'] = ((df[column] == ~df['signal']) & (~df['signal']))", "df.index pre = 'dt_' attr_list = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Hour', 'Minute']", "to be true, then he must hold to this belief even though society", "break all established conventions to accomplish it. |||||||||||| # # |||| The merit", "pass def transform(self): pass def fit_transform(self, X): X = aligned(X) X = sup_res_align(X)", "collectively, there is no group mind to think collectively. |||||||||||| # # ||||", "= process_dates(X) return X def aligned(df): for column in df: if 'direction' in", "def fit_transform(self, X): X = aligned(X) X = sup_res_align(X) # X = process_dates(X)", "though society opposes him. ||||||| # # |||| Not only know what you", "fit_transform(self, X): X = aligned(X) X = sup_res_align(X) # X = process_dates(X) return", "be true, then he must hold to this belief even though society opposes", "on 29/01/2018. \"\"\" class IndicatorPipeLine: def __init__(self): pass def fit(self): pass def transform(self):", "class IndicatorPipeLine: def __init__(self): pass def fit(self): pass def transform(self): pass def fit_transform(self,", "attr_list = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Hour', 'Minute'] for attr in attr_list:", "|||||||||||||||||||||||||||||||||||||||||||||||||| AQUITANIA ||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||| To be a thinker", "= df.apply(lambda x: x[column] if x['signal'] else -x[column], axis=1) return df def sup_res_align(df):", "in df: if 'sup' in column: df[column + '_aligned'] = ((df[column] == df['signal'])", "true, then he must hold to this belief even though society opposes him.", "== ~df['signal']) & (~df['signal'])) return df def process_dates(df): fld = df.index pre =", "||||||||||||| # # |||| If a man believes a claim to be true,", "for column in df: if 'tied' in column: df[column] = df.apply(lambda x: x[column]", "willing to break all established conventions to accomplish it. |||||||||||| # # ||||", "X): X = aligned(X) X = sup_res_align(X) # X = process_dates(X) return X", "no group mind to think collectively. |||||||||||| # # |||| Each man must", "= sup_res_align(X) # X = process_dates(X) return X def aligned(df): for column in", "by the factual evidence of a case, not by the judgment of others", "what you want, but be willing to break all established conventions to accomplish", "if 'sup' in column: df[column + '_aligned'] = ((df[column] == df['signal']) & (df['signal']))", "= ((df[column] == df['signal']) & (df['signal'])) if 'res' in column: df[column + '_aligned']", "= aligned(X) X = sup_res_align(X) # X = process_dates(X) return X def aligned(df):", "through 'Hands-On Machine Learning...' on 29/01/2018. \"\"\" class IndicatorPipeLine: def __init__(self): pass def", "= ((df[column] == ~df['signal']) & (~df['signal'])) return df def process_dates(df): fld = df.index", "responsibility for his own life, each must be sovereign by his own judgment.", "return X def aligned(df): for column in df: if 'direction' in column: df[column]", "& (df['signal'])) if 'res' in column: df[column + '_aligned'] = ((df[column] == ~df['signal'])", "|||| Not only know what you want, but be willing to break all", "column in df: if 'sup' in column: df[column + '_aligned'] = ((df[column] ==", "'Minute'] for attr in attr_list: df[pre + attr] = getattr(fld.dt, attr.lower()) return df", "column: df[column + '_aligned'] = ((df[column] == df['signal']) & (df['signal'])) if 'res' in", "to think collectively. |||||||||||| # # |||| Each man must accept responsibility for", "of a design is the only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| # #", "to accomplish it. |||||||||||| # # |||| The merit of a design is", "Learning...' on 29/01/2018. \"\"\" class IndicatorPipeLine: def __init__(self): pass def fit(self): pass def", "is no group mind to think collectively. |||||||||||| # # |||| Each man", "df.apply(lambda x: x[column] if x['signal'] else -x[column], axis=1) return df def sup_res_align(df): for", "the factual evidence of a case, not by the judgment of others ||||||||||||||||||", "in column: df[column] = df[column] == df['signal'] for column in df: if 'tied'", "df[column + '_aligned'] = ((df[column] == df['signal']) & (df['signal'])) if 'res' in column:", "evidence of a case, not by the judgment of others |||||||||||||||||| # #", "aligned(X) X = sup_res_align(X) # X = process_dates(X) return X def aligned(df): for", "df[column] = df.apply(lambda x: x[column] if x['signal'] else -x[column], axis=1) return df def", "in column: df[column + '_aligned'] = ((df[column] == df['signal']) & (df['signal'])) if 'res'", "belief even though society opposes him. ||||||| # # |||| Not only know", "# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\" .. moduleauthor:: <NAME> Studying Pipelines through 'Hands-On Machine", "def __init__(self): pass def fit(self): pass def transform(self): pass def fit_transform(self, X): X", "'Hour', 'Minute'] for attr in attr_list: df[pre + attr] = getattr(fld.dt, attr.lower()) return", "there is no group mind to think collectively. |||||||||||| # # |||| Each", "sup_res_align(df): for column in df: if 'sup' in column: df[column + '_aligned'] =", "is no group stomach to digest collectively, there is no group mind to", "df: if 'tied' in column: df[column] = df.apply(lambda x: x[column] if x['signal'] else", "df[column] == df['signal'] for column in df: if 'tied' in column: df[column] =", "must be sovereign by his own judgment. ||||||||||||| # # |||| If a", "return df def sup_res_align(df): for column in df: if 'sup' in column: df[column", "X def aligned(df): for column in df: if 'direction' in column: df[column] =", "As there is no group stomach to digest collectively, there is no group", "# |||| Each man must accept responsibility for his own life, each must", "a claim to be true, then he must hold to this belief even", "that you require. |||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\" .. moduleauthor:: <NAME>", "|||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\" .. moduleauthor:: <NAME> Studying Pipelines through", "AQUITANIA ||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||| To be a thinker means", "'_aligned'] = ((df[column] == df['signal']) & (df['signal'])) if 'res' in column: df[column +", "df: if 'sup' in column: df[column + '_aligned'] = ((df[column] == df['signal']) &", "collectively. |||||||||||| # # |||| Each man must accept responsibility for his own", "X = aligned(X) X = sup_res_align(X) # X = process_dates(X) return X def", "'_aligned'] = ((df[column] == ~df['signal']) & (~df['signal'])) return df def process_dates(df): fld =", "'sup' in column: df[column + '_aligned'] = ((df[column] == df['signal']) & (df['signal'])) if", "not by the judgment of others |||||||||||||||||| # # |||| As there is", "to this belief even though society opposes him. ||||||| # # |||| Not", "Not only know what you want, but be willing to break all established", "'Dayofweek', 'Hour', 'Minute'] for attr in attr_list: df[pre + attr] = getattr(fld.dt, attr.lower())", "######################################################################################################################## \"\"\" .. moduleauthor:: <NAME> Studying Pipelines through 'Hands-On Machine Learning...' on 29/01/2018.", "def transform(self): pass def fit_transform(self, X): X = aligned(X) X = sup_res_align(X) #", "# |||||||||||||||||||||||||||||||||||||||||||||||||| AQUITANIA ||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||| To be a", "X = process_dates(X) return X def aligned(df): for column in df: if 'direction'", "Pipelines through 'Hands-On Machine Learning...' on 29/01/2018. \"\"\" class IndicatorPipeLine: def __init__(self): pass", "conventions to accomplish it. |||||||||||| # # |||| The merit of a design", "column in df: if 'tied' in column: df[column] = df.apply(lambda x: x[column] if", "a design is the only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||", "in column: df[column + '_aligned'] = ((df[column] == ~df['signal']) & (~df['signal'])) return df", "man must accept responsibility for his own life, each must be sovereign by", "29/01/2018. \"\"\" class IndicatorPipeLine: def __init__(self): pass def fit(self): pass def transform(self): pass", "no group stomach to digest collectively, there is no group mind to think", "-x[column], axis=1) return df def sup_res_align(df): for column in df: if 'sup' in", "accomplish it. |||||||||||| # # |||| The merit of a design is the", "# X = process_dates(X) return X def aligned(df): for column in df: if", "of a case, not by the judgment of others |||||||||||||||||| # # ||||", "you want, but be willing to break all established conventions to accomplish it.", "mind to think collectively. |||||||||||| # # |||| Each man must accept responsibility", "it. |||||||||||| # # |||| The merit of a design is the only", "you require. |||||||||||||||||||||||||||||||||||||||||||||||| # # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # ######################################################################################################################## \"\"\" .. moduleauthor:: <NAME> Studying", "to go by the factual evidence of a case, not by the judgment", "want, but be willing to break all established conventions to accomplish it. ||||||||||||", "sovereign by his own judgment. ||||||||||||| # # |||| If a man believes", "# # |||| Each man must accept responsibility for his own life, each", "column in df: if 'direction' in column: df[column] = df[column] == df['signal'] for", "go by the factual evidence of a case, not by the judgment of", "def sup_res_align(df): for column in df: if 'sup' in column: df[column + '_aligned']", "# # |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||| To be a thinker means to go", "opposes him. ||||||| # # |||| Not only know what you want, but", "Studying Pipelines through 'Hands-On Machine Learning...' on 29/01/2018. \"\"\" class IndicatorPipeLine: def __init__(self):", "judgment. ||||||||||||| # # |||| If a man believes a claim to be", "== df['signal']) & (df['signal'])) if 'res' in column: df[column + '_aligned'] = ((df[column]", "this belief even though society opposes him. ||||||| # # |||| Not only", "for his own life, each must be sovereign by his own judgment. |||||||||||||", "# # |||| If a man believes a claim to be true, then", "# |||| Not only know what you want, but be willing to break", "process_dates(df): fld = df.index pre = 'dt_' attr_list = ['Year', 'Month', 'Week', 'Day',", "fld = df.index pre = 'dt_' attr_list = ['Year', 'Month', 'Week', 'Day', 'Dayofweek',", "in column: df[column] = df.apply(lambda x: x[column] if x['signal'] else -x[column], axis=1) return", "# # |||| Not only know what you want, but be willing to", "man believes a claim to be true, then he must hold to this", "= ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Hour', 'Minute'] for attr in attr_list: df[pre", "# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| # # |||| To be a thinker means to go by", "each must be sovereign by his own judgment. ||||||||||||| # # |||| If", "<NAME> Studying Pipelines through 'Hands-On Machine Learning...' on 29/01/2018. \"\"\" class IndicatorPipeLine: def", "# # |||| As there is no group stomach to digest collectively, there", "him. ||||||| # # |||| Not only know what you want, but be", "hold to this belief even though society opposes him. ||||||| # # ||||" ]
[ "'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader) return data except FileNotFoundError: raise FileNotFoundError('could", "= yaml.load(yaml_file, Loader=yaml.FullLoader) return data except FileNotFoundError: raise FileNotFoundError('could not load yaml at", "parse_url_path(url_path): reformat = url_path.replace('%2F', '/') reformat = reformat.replace('+', ' ') return reformat def", "Args: stringLength (int, optional): length of string. Defaults to 8. Returns: str: random", "= reformat.replace('+', ' ') return reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int, optional):", "return data except FileNotFoundError: raise FileNotFoundError('could not load yaml at path: {path}') except", "except FileNotFoundError: raise FileNotFoundError('could not load yaml at path: {path}') except Exception as", "\"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int, optional): length of string. Defaults to 8. Returns: str:", "k, v in body.items(): if len(v) == 1: body.update({k: v[0]}) return body def", "urllib.parse, random, string def load_yaml(path): import yaml try: with open(path, 'r') as yaml_file:", "body def unquote_plus(text): return urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat = url_path.replace('%2F', '/') reformat =", "url_path.replace('%2F', '/') reformat = reformat.replace('+', ' ') return reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args:", "Exception as e: raise e def parse_body(body): body = urllib.parse.parse_qs(body) for k, v", "Returns: str: random string \"\"\" letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in", "yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader) return data except FileNotFoundError: raise FileNotFoundError('could not load", "in body.items(): if len(v) == 1: body.update({k: v[0]}) return body def unquote_plus(text): return", "body.update({k: v[0]}) return body def unquote_plus(text): return urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat = url_path.replace('%2F',", "parse_body(body): body = urllib.parse.parse_qs(body) for k, v in body.items(): if len(v) == 1:", "def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int, optional): length of string. Defaults to 8.", "optional): length of string. Defaults to 8. Returns: str: random string \"\"\" letters", "string def load_yaml(path): import yaml try: with open(path, 'r') as yaml_file: data =", "length of string. Defaults to 8. Returns: str: random string \"\"\" letters =", "as yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader) return data except FileNotFoundError: raise FileNotFoundError('could not", "except Exception as e: raise e def parse_body(body): body = urllib.parse.parse_qs(body) for k,", "string. Defaults to 8. Returns: str: random string \"\"\" letters = string.ascii_lowercase return", "body = urllib.parse.parse_qs(body) for k, v in body.items(): if len(v) == 1: body.update({k:", "to 8. Returns: str: random string \"\"\" letters = string.ascii_lowercase return ''.join(random.choice(letters) for", "'/') reformat = reformat.replace('+', ' ') return reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength", "FileNotFoundError('could not load yaml at path: {path}') except Exception as e: raise e", "') return reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int, optional): length of string.", "of string. Defaults to 8. Returns: str: random string \"\"\" letters = string.ascii_lowercase", "for k, v in body.items(): if len(v) == 1: body.update({k: v[0]}) return body", "random, string def load_yaml(path): import yaml try: with open(path, 'r') as yaml_file: data", "load_yaml(path): import yaml try: with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader)", "urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat = url_path.replace('%2F', '/') reformat = reformat.replace('+', ' ') return", "FileNotFoundError: raise FileNotFoundError('could not load yaml at path: {path}') except Exception as e:", "' ') return reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int, optional): length of", "data = yaml.load(yaml_file, Loader=yaml.FullLoader) return data except FileNotFoundError: raise FileNotFoundError('could not load yaml", "def parse_url_path(url_path): reformat = url_path.replace('%2F', '/') reformat = reformat.replace('+', ' ') return reformat", "e def parse_body(body): body = urllib.parse.parse_qs(body) for k, v in body.items(): if len(v)", "with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader) return data except FileNotFoundError:", "not load yaml at path: {path}') except Exception as e: raise e def", "load yaml at path: {path}') except Exception as e: raise e def parse_body(body):", "functions\"\"\" import urllib.parse, random, string def load_yaml(path): import yaml try: with open(path, 'r')", "body.items(): if len(v) == 1: body.update({k: v[0]}) return body def unquote_plus(text): return urllib.parse.unquote_plus(text)", "stringLength (int, optional): length of string. Defaults to 8. Returns: str: random string", "raise e def parse_body(body): body = urllib.parse.parse_qs(body) for k, v in body.items(): if", "as e: raise e def parse_body(body): body = urllib.parse.parse_qs(body) for k, v in", "unquote_plus(text): return urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat = url_path.replace('%2F', '/') reformat = reformat.replace('+', '", "(int, optional): length of string. Defaults to 8. Returns: str: random string \"\"\"", "import urllib.parse, random, string def load_yaml(path): import yaml try: with open(path, 'r') as", "reformat = reformat.replace('+', ' ') return reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int,", "return urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat = url_path.replace('%2F', '/') reformat = reformat.replace('+', ' ')", "at path: {path}') except Exception as e: raise e def parse_body(body): body =", "return reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int, optional): length of string. Defaults", "{path}') except Exception as e: raise e def parse_body(body): body = urllib.parse.parse_qs(body) for", "8. Returns: str: random string \"\"\" letters = string.ascii_lowercase return ''.join(random.choice(letters) for i", "yaml try: with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader) return data", "Loader=yaml.FullLoader) return data except FileNotFoundError: raise FileNotFoundError('could not load yaml at path: {path}')", "path: {path}') except Exception as e: raise e def parse_body(body): body = urllib.parse.parse_qs(body)", "reformat.replace('+', ' ') return reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int, optional): length", "return body def unquote_plus(text): return urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat = url_path.replace('%2F', '/') reformat", "1: body.update({k: v[0]}) return body def unquote_plus(text): return urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat =", "raise FileNotFoundError('could not load yaml at path: {path}') except Exception as e: raise", "= urllib.parse.parse_qs(body) for k, v in body.items(): if len(v) == 1: body.update({k: v[0]})", "def load_yaml(path): import yaml try: with open(path, 'r') as yaml_file: data = yaml.load(yaml_file,", "reformat = url_path.replace('%2F', '/') reformat = reformat.replace('+', ' ') return reformat def random_string(stringLength=8):", "= url_path.replace('%2F', '/') reformat = reformat.replace('+', ' ') return reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/", "reformat def random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int, optional): length of string. Defaults to", "try: with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader) return data except", "== 1: body.update({k: v[0]}) return body def unquote_plus(text): return urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat", "Defaults to 8. Returns: str: random string \"\"\" letters = string.ascii_lowercase return ''.join(random.choice(letters)", "urllib.parse.parse_qs(body) for k, v in body.items(): if len(v) == 1: body.update({k: v[0]}) return", "v[0]}) return body def unquote_plus(text): return urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat = url_path.replace('%2F', '/')", "import yaml try: with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader) return", "v in body.items(): if len(v) == 1: body.update({k: v[0]}) return body def unquote_plus(text):", "str: random string \"\"\" letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(stringLength))", "e: raise e def parse_body(body): body = urllib.parse.parse_qs(body) for k, v in body.items():", "len(v) == 1: body.update({k: v[0]}) return body def unquote_plus(text): return urllib.parse.unquote_plus(text) def parse_url_path(url_path):", "data except FileNotFoundError: raise FileNotFoundError('could not load yaml at path: {path}') except Exception", "yaml at path: {path}') except Exception as e: raise e def parse_body(body): body", "def parse_body(body): body = urllib.parse.parse_qs(body) for k, v in body.items(): if len(v) ==", "def unquote_plus(text): return urllib.parse.unquote_plus(text) def parse_url_path(url_path): reformat = url_path.replace('%2F', '/') reformat = reformat.replace('+',", "\"\"\"One-off functions\"\"\" import urllib.parse, random, string def load_yaml(path): import yaml try: with open(path,", "yaml.load(yaml_file, Loader=yaml.FullLoader) return data except FileNotFoundError: raise FileNotFoundError('could not load yaml at path:", "if len(v) == 1: body.update({k: v[0]}) return body def unquote_plus(text): return urllib.parse.unquote_plus(text) def", "random_string(stringLength=8): \"\"\"https://pynative.com/python-generate-random-string/ Args: stringLength (int, optional): length of string. Defaults to 8. Returns:", "open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader) return data except FileNotFoundError: raise" ]
[ "'value': 'Orroral Valley, Australia'}, {'label': _('Prince Albert, Canada'), 'value': 'Prince Albert, Canada'}, {'label':", "= {} for year in range(1962,1974): year_dict[year] = str(year) lat_dict = {} for", "Valley, Australia'}, {'label': _('Prince Albert, Canada'), 'value': 'Prince Albert, Canada'}, {'label': _('Ottawa, Canada'),", "'value': 'East Grand Forks, Minnesota'}, {'label': _('Rosman, No. Carolina'), 'value': 'Rosman, No. Carolina'},", "Point, Maryland'}, {'label': _('South Atlantic, Falkland Islands'), 'value': 'South Atlantic, Falkland Islands'}, {'label':", "x_axis_options = [ {'label': _('Date'), 'value': ('timestamp')}, {'label': _('Latitude'), 'value': ('lat')}, {'label': _('Longitude'),", "_('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'}, {'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}] # Getting", "station in station_name_options: station_values.append(station['value']) x_axis_options = [ {'label': _('Date'), 'value': ('timestamp')}, {'label': _('Latitude'),", "{'label': _('Resolute Bay, No. W. Territories'), 'value': 'Resolute Bay, No. W. Territories'}, {'label':", "_('Ottawa, Canada'), 'value': 'Ottawa, Canada'}, {'label': _('Byrd Station, Antartica'), 'value': 'Byrd Station, Antartica'},", "_('Maximum Depth'), 'value': ('max_depth')}] year_dict = {} for year in range(1962,1974): year_dict[year] =", "_('South Point, Hawaiian Islands'), 'value': 'South Point, Hawaiian Islands'}, {'label': _('Lima, Peru'), 'value':", "hard coded into app.py in the translate_static function. Babel could not translate from", "Bay, No. W. Territories'}, {'label': _('Blossom Point, Maryland'), 'value': 'Blossom Point, Maryland'}, {'label':", "('max_depth')}] year_dict = {} for year in range(1962,1974): year_dict[year] = str(year) lat_dict =", "of Alaska, Fairbanks, Alaska'}, {'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'}, {'label': _('Quito, Ecuador'),", "Station, Antartica'), 'value': 'Byrd Station, Antartica'}, {'label': _('Las Palmas, Canary Island'), 'value': 'Las", "Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'}, {'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'}, {'label':", "year_dict[year] = str(year) lat_dict = {} for lat in range(-90, 90+1, 15): lat_dict[lat]", "flask_babel import Babel, _ # Controls for webapp station_name_options = [ {'label': _('Resolute", "_('Mojave, California'), 'value': 'Mojave, California'}, {'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'}, {'label': _('Kashima,", "Minnesota'}, {'label': _('Rosman, No. Carolina'), 'value': 'Rosman, No. Carolina'}, {'label': _('College, Fairbanks, Alaska'),", "'value': 'South Atlantic, Falkland Islands'}, {'label': _(\"St. John's, Newfoundland\"), 'value': \"St. John's, Newfoundland\"},", "Australia'}, {'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'}, {'label': _('Tromso,", "{'label': _('University of Alaska, Fairbanks, Alaska'), 'value': 'University of Alaska, Fairbanks, Alaska'}, {'label':", "_('Blossom Point, Maryland'), 'value': 'Blossom Point, Maryland'}, {'label': _('South Atlantic, Falkland Islands'), 'value':", "_('University of Alaska, Fairbanks, Alaska'), 'value': 'University of Alaska, Fairbanks, Alaska'}, {'label': _('Darwin,", "Canada'), 'value': 'Ottawa, Canada'}, {'label': _('Byrd Station, Antartica'), 'value': 'Byrd Station, Antartica'}, {'label':", "California'), 'value': 'Mojave, California'}, {'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'}, {'label': _('Kashima, Japan'),", "Point, Hawaiian Islands'), 'value': 'South Point, Hawaiian Islands'}, {'label': _('Lima, Peru'), 'value': 'Lima,", "_('Latitude'), 'value': ('lat')}, {'label': _('Longitude'), 'value': ('lon')}] y_axis_options = [ {'label': _('Minimum Frequency'),", "('lon')}] y_axis_options = [ {'label': _('Minimum Frequency'), 'value': ('fmin')}, {'label': _('Maximum Depth'), 'value':", "Norway'}, {'label': _('University of Alaska, Fairbanks, Alaska'), 'value': 'University of Alaska, Fairbanks, Alaska'},", "could not translate from this file. from flask_babel import Babel, _ # Controls", "Valley, Australia'), 'value': 'Orroral Valley, Australia'}, {'label': _('Prince Albert, Canada'), 'value': 'Prince Albert,", "function. Babel could not translate from this file. from flask_babel import Babel, _", "to be hard coded into app.py in the translate_static function. Babel could not", "{'label': _('Longitude'), 'value': ('lon')}] y_axis_options = [ {'label': _('Minimum Frequency'), 'value': ('fmin')}, {'label':", "Bay, No. W. Territories'), 'value': 'Resolute Bay, No. W. Territories'}, {'label': _('Blossom Point,", "_('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'}, {'label': _('Tromso, Norway'), 'value':", "('fmin')}, {'label': _('Maximum Depth'), 'value': ('max_depth')}] year_dict = {} for year in range(1962,1974):", "'Woomera, Australia'}, {'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'}, {'label':", "[ {'label': _('Date'), 'value': ('timestamp')}, {'label': _('Latitude'), 'value': ('lat')}, {'label': _('Longitude'), 'value': ('lon')}]", "{'label': _('Maximum Depth'), 'value': ('max_depth')}] year_dict = {} for year in range(1962,1974): year_dict[year]", "_('Bretigny, France'), 'value': 'Bretigny, France'}, {'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'}, {'label': _('Boulder,", "had to be hard coded into app.py in the translate_static function. Babel could", "'Tromso, Norway'}, {'label': _('University of Alaska, Fairbanks, Alaska'), 'value': 'University of Alaska, Fairbanks,", "'South Point, Hawaiian Islands'}, {'label': _('Lima, Peru'), 'value': 'Lima, Peru'}, {'label': _('Johannesburg, South", "values of the station names station_values = [] for station in station_name_options: station_values.append(station['value'])", "No. W. Territories'}, {'label': _('Blossom Point, Maryland'), 'value': 'Blossom Point, Maryland'}, {'label': _('South", "Chile'), 'value': 'Antofagasta, Chile'}, {'label': _('East Grand Forks, Minnesota'), 'value': 'East Grand Forks,", "[ {'label': _('Minimum Frequency'), 'value': ('fmin')}, {'label': _('Maximum Depth'), 'value': ('max_depth')}] year_dict =", "'Rosman, No. Carolina'}, {'label': _('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'}, {'label': _('Woomera,", "range(-90, 90+1, 15): lat_dict[lat] = str(lat) lon_dict = {} for lon in range(-180,", "# Getting only the values of the station names station_values = [] for", "Depth'), 'value': ('max_depth')}] year_dict = {} for year in range(1962,1974): year_dict[year] = str(year)", "France'}, {'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'}, {'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'},", "Canada'), 'value': 'Prince Albert, Canada'}, {'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'}, {'label': _('Byrd", "'South Atlantic, Falkland Islands'}, {'label': _(\"St. John's, Newfoundland\"), 'value': \"St. John's, Newfoundland\"}, {'label':", "Fairbanks, Alaska'}, {'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'}, {'label': _('University of Alaska, Fairbanks,", "Atlantic, Falkland Islands'}, {'label': _(\"St. John's, Newfoundland\"), 'value': \"St. John's, Newfoundland\"}, {'label': _('Orroral", "{'label': _('Byrd Station, Antartica'), 'value': 'Byrd Station, Antartica'}, {'label': _('Las Palmas, Canary Island'),", "Alaska'}, {'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'}, {'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value':", "Hawaii'), 'value': 'Kauai, Hawaii'}, {'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}] # Getting only", "names station_values = [] for station in station_name_options: station_values.append(station['value']) x_axis_options = [ {'label':", "Falkland Islands'}, {'label': _(\"St. John's, Newfoundland\"), 'value': \"St. John's, Newfoundland\"}, {'label': _('Orroral Valley,", "for webapp station_name_options = [ {'label': _('Resolute Bay, No. W. Territories'), 'value': 'Resolute", "Australia'}, {'label': _('Prince Albert, Canada'), 'value': 'Prince Albert, Canada'}, {'label': _('Ottawa, Canada'), 'value':", "{'label': _('South Point, Hawaiian Islands'), 'value': 'South Point, Hawaiian Islands'}, {'label': _('Lima, Peru'),", "'East Grand Forks, Minnesota'}, {'label': _('Rosman, No. Carolina'), 'value': 'Rosman, No. Carolina'}, {'label':", "Hawaiian Islands'}, {'label': _('Lima, Peru'), 'value': 'Lima, Peru'}, {'label': _('Johannesburg, South Africa'), 'value':", "_('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'}, {'label': _('East Grand Forks, Minnesota'), 'value': 'East Grand", "'Kano, Nigeria'}, {'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'}, {'label': _('Bretigny, France'), 'value': 'Bretigny,", "{'label': _('South Atlantic, Falkland Islands'), 'value': 'South Atlantic, Falkland Islands'}, {'label': _(\"St. John's,", "{'label': _('Mojave, California'), 'value': 'Mojave, California'}, {'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'}, {'label':", "Florida'}, {'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'}, {'label': _('East Grand Forks, Minnesota'), 'value':", "{'label': _('Blossom Point, Maryland'), 'value': 'Blossom Point, Maryland'}, {'label': _('South Atlantic, Falkland Islands'),", "'Tananarive, Madagascar'}, {'label': _('Bretigny, France'), 'value': 'Bretigny, France'}, {'label': _('Singapore, Malaysia'), 'value': 'Singapore,", "{'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'}, {'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore", "{'label': _('Minimum Frequency'), 'value': ('fmin')}, {'label': _('Maximum Depth'), 'value': ('max_depth')}] year_dict = {}", "'value': 'Mojave, California'}, {'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'}, {'label': _('Kashima, Japan'), 'value':", "{'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'}, {'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}] #", "No. Carolina'}, {'label': _('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'}, {'label': _('Woomera, Australia'),", "station_values.append(station['value']) x_axis_options = [ {'label': _('Date'), 'value': ('timestamp')}, {'label': _('Latitude'), 'value': ('lat')}, {'label':", "Carolina'}, {'label': _('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'}, {'label': _('Woomera, Australia'), 'value':", "{} for lat in range(-90, 90+1, 15): lat_dict[lat] = str(lat) lon_dict = {}", "Controls had to be hard coded into app.py in the translate_static function. Babel", "Getting only the values of the station names station_values = [] for station", "Myers, Florida'}, {'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'}, {'label': _('East Grand Forks, Minnesota'),", "translate from this file. from flask_babel import Babel, _ # Controls for webapp", "_('Johannesburg, South Africa'), 'value': 'Johannesburg, South Africa'}, {'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'},", "Myers, Florida'), 'value': 'Fort Myers, Florida'}, {'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'}, {'label':", "{'label': _('Winkfield, England'), 'value': 'Winkfield, England'}, {'label': _('Fort Myers, Florida'), 'value': 'Fort Myers,", "'value': 'University of Alaska, Fairbanks, Alaska'}, {'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'}, {'label':", "'value': 'Johannesburg, South Africa'}, {'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'}, {'label': _('Tananarive, Madagascar'),", "{'label': _('Bretigny, France'), 'value': 'Bretigny, France'}, {'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'}, {'label':", "is depreciated. Controls had to be hard coded into app.py in the translate_static", "'value': 'College, Fairbanks, Alaska'}, {'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'}, {'label': _('Gilmore Creek,", "= [] for station in station_name_options: station_values.append(station['value']) x_axis_options = [ {'label': _('Date'), 'value':", "{'label': _('Fort Myers, Florida'), 'value': 'Fort Myers, Florida'}, {'label': _('Antofagasta, Chile'), 'value': 'Antofagasta,", "coded into app.py in the translate_static function. Babel could not translate from this", "Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'}, {'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'}, {'label':", "{'label': _('East Grand Forks, Minnesota'), 'value': 'East Grand Forks, Minnesota'}, {'label': _('Rosman, No.", "Canada'}, {'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'}, {'label': _('Byrd Station, Antartica'), 'value': 'Byrd", "this file. from flask_babel import Babel, _ # Controls for webapp station_name_options =", "Carolina'), 'value': 'Rosman, No. Carolina'}, {'label': _('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'},", "lat_dict = {} for lat in range(-90, 90+1, 15): lat_dict[lat] = str(lat) lon_dict", "{'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'}, {'label': _('Bretigny, France'), 'value': 'Bretigny, France'}, {'label':", "'value': 'Rosman, No. Carolina'}, {'label': _('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'}, {'label':", "str(lat) lon_dict = {} for lon in range(-180, 180+1, 30): lon_dict[lon] = str(lon)", "Hawaii'}, {'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}] # Getting only the values of", "= str(year) lat_dict = {} for lat in range(-90, 90+1, 15): lat_dict[lat] =", "'Lima, Peru'}, {'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg, South Africa'}, {'label': _('Kano, Nigeria'),", "Madagascar'), 'value': 'Tananarive, Madagascar'}, {'label': _('Bretigny, France'), 'value': 'Bretigny, France'}, {'label': _('Singapore, Malaysia'),", "{'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'}, {'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'}, {'label':", "Alaska'), 'value': 'University of Alaska, Fairbanks, Alaska'}, {'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'},", "Islands'), 'value': 'South Point, Hawaiian Islands'}, {'label': _('Lima, Peru'), 'value': 'Lima, Peru'}, {'label':", "Canary Island'}, {'label': _('Winkfield, England'), 'value': 'Winkfield, England'}, {'label': _('Fort Myers, Florida'), 'value':", "No. W. Territories'), 'value': 'Resolute Bay, No. W. Territories'}, {'label': _('Blossom Point, Maryland'),", "Albert, Canada'), 'value': 'Prince Albert, Canada'}, {'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'}, {'label':", "Forks, Minnesota'), 'value': 'East Grand Forks, Minnesota'}, {'label': _('Rosman, No. Carolina'), 'value': 'Rosman,", "South Africa'), 'value': 'Johannesburg, South Africa'}, {'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'}, {'label':", "station_values = [] for station in station_name_options: station_values.append(station['value']) x_axis_options = [ {'label': _('Date'),", "Point, Hawaiian Islands'}, {'label': _('Lima, Peru'), 'value': 'Lima, Peru'}, {'label': _('Johannesburg, South Africa'),", "Canary Island'), 'value': 'Las Palmas, Canary Island'}, {'label': _('Winkfield, England'), 'value': 'Winkfield, England'},", "range(1962,1974): year_dict[year] = str(year) lat_dict = {} for lat in range(-90, 90+1, 15):", "Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'}, {'label': _('Tromso, Norway'), 'value': 'Tromso,", "of Alaska, Fairbanks, Alaska'), 'value': 'University of Alaska, Fairbanks, Alaska'}, {'label': _('Darwin, Australia'),", "Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'}, {'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'},", "'Darwin, Australia'}, {'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'}, {'label': _('South Point, Hawaiian Islands'),", "{'label': _('Lima, Peru'), 'value': 'Lima, Peru'}, {'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg, South", "_('Fort Myers, Florida'), 'value': 'Fort Myers, Florida'}, {'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'},", "Palmas, Canary Island'), 'value': 'Las Palmas, Canary Island'}, {'label': _('Winkfield, England'), 'value': 'Winkfield,", "_('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'}, {'label': _('Bretigny, France'), 'value': 'Bretigny, France'}, {'label': _('Singapore,", "{'label': _('Rosman, No. Carolina'), 'value': 'Rosman, No. Carolina'}, {'label': _('College, Fairbanks, Alaska'), 'value':", "_('Prince Albert, Canada'), 'value': 'Prince Albert, Canada'}, {'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'},", "'Fort Myers, Florida'}, {'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'}, {'label': _('East Grand Forks,", "('lat')}, {'label': _('Longitude'), 'value': ('lon')}] y_axis_options = [ {'label': _('Minimum Frequency'), 'value': ('fmin')},", "_('Date'), 'value': ('timestamp')}, {'label': _('Latitude'), 'value': ('lat')}, {'label': _('Longitude'), 'value': ('lon')}] y_axis_options =", "'Kashima, Japan'}] # Getting only the values of the station names station_values =", "Chile'}, {'label': _('East Grand Forks, Minnesota'), 'value': 'East Grand Forks, Minnesota'}, {'label': _('Rosman,", "app.py in the translate_static function. Babel could not translate from this file. from", "Fairbanks, Alaska'}, {'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'}, {'label': _('Quito, Ecuador'), 'value': 'Quito,", "'value': 'Bretigny, France'}, {'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'}, {'label': _('Boulder, Colorado'), 'value':", "in range(1962,1974): year_dict[year] = str(year) lat_dict = {} for lat in range(-90, 90+1,", "# Controls for webapp station_name_options = [ {'label': _('Resolute Bay, No. W. Territories'),", "Alaska, Fairbanks, Alaska'}, {'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'}, {'label': _('Quito, Ecuador'), 'value':", "Alaska'), 'value': 'College, Fairbanks, Alaska'}, {'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'}, {'label': _('Gilmore", "Malaysia'), 'value': 'Singapore, Malaysia'}, {'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'}, {'label': _('Mojave, California'),", "'Kauai, Hawaii'}, {'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}] # Getting only the values", "station_name_options: station_values.append(station['value']) x_axis_options = [ {'label': _('Date'), 'value': ('timestamp')}, {'label': _('Latitude'), 'value': ('lat')},", "'Gilmore Creek, Fairbanks, Alaska'}, {'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'}, {'label': _('University of", "Falkland Islands'), 'value': 'South Atlantic, Falkland Islands'}, {'label': _(\"St. John's, Newfoundland\"), 'value': \"St.", "'value': 'Quito, Ecuador'}, {'label': _('South Point, Hawaiian Islands'), 'value': 'South Point, Hawaiian Islands'},", "England'}, {'label': _('Fort Myers, Florida'), 'value': 'Fort Myers, Florida'}, {'label': _('Antofagasta, Chile'), 'value':", "Africa'), 'value': 'Johannesburg, South Africa'}, {'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'}, {'label': _('Tananarive,", "_('Byrd Station, Antartica'), 'value': 'Byrd Station, Antartica'}, {'label': _('Las Palmas, Canary Island'), 'value':", "file is depreciated. Controls had to be hard coded into app.py in the", "webapp station_name_options = [ {'label': _('Resolute Bay, No. W. Territories'), 'value': 'Resolute Bay,", "{'label': _('Orroral Valley, Australia'), 'value': 'Orroral Valley, Australia'}, {'label': _('Prince Albert, Canada'), 'value':", "in the translate_static function. Babel could not translate from this file. from flask_babel", "Fairbanks, Alaska'}, {'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'}, {'label': _('Gilmore Creek, Fairbanks, Alaska'),", "'value': ('lon')}] y_axis_options = [ {'label': _('Minimum Frequency'), 'value': ('fmin')}, {'label': _('Maximum Depth'),", "'University of Alaska, Fairbanks, Alaska'}, {'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'}, {'label': _('Quito,", "{'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'}, {'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'}, {'label':", "for station in station_name_options: station_values.append(station['value']) x_axis_options = [ {'label': _('Date'), 'value': ('timestamp')}, {'label':", "Newfoundland\"), 'value': \"St. John's, Newfoundland\"}, {'label': _('Orroral Valley, Australia'), 'value': 'Orroral Valley, Australia'},", "= [ {'label': _('Minimum Frequency'), 'value': ('fmin')}, {'label': _('Maximum Depth'), 'value': ('max_depth')}] year_dict", "_('Orroral Valley, Australia'), 'value': 'Orroral Valley, Australia'}, {'label': _('Prince Albert, Canada'), 'value': 'Prince", "depreciated. Controls had to be hard coded into app.py in the translate_static function.", "Island'}, {'label': _('Winkfield, England'), 'value': 'Winkfield, England'}, {'label': _('Fort Myers, Florida'), 'value': 'Fort", "the translate_static function. Babel could not translate from this file. from flask_babel import", "Norway'), 'value': 'Tromso, Norway'}, {'label': _('University of Alaska, Fairbanks, Alaska'), 'value': 'University of", "'Orroral Valley, Australia'}, {'label': _('Prince Albert, Canada'), 'value': 'Prince Albert, Canada'}, {'label': _('Ottawa,", "'value': 'Ottawa, Canada'}, {'label': _('Byrd Station, Antartica'), 'value': 'Byrd Station, Antartica'}, {'label': _('Las", "'value': 'Antofagasta, Chile'}, {'label': _('East Grand Forks, Minnesota'), 'value': 'East Grand Forks, Minnesota'},", "'value': 'Boulder, Colorado'}, {'label': _('Mojave, California'), 'value': 'Mojave, California'}, {'label': _('Kauai, Hawaii'), 'value':", "{'label': _(\"St. John's, Newfoundland\"), 'value': \"St. John's, Newfoundland\"}, {'label': _('Orroral Valley, Australia'), 'value':", "station names station_values = [] for station in station_name_options: station_values.append(station['value']) x_axis_options = [", "str(year) lat_dict = {} for lat in range(-90, 90+1, 15): lat_dict[lat] = str(lat)", "'Johannesburg, South Africa'}, {'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'}, {'label': _('Tananarive, Madagascar'), 'value':", "of the station names station_values = [] for station in station_name_options: station_values.append(station['value']) x_axis_options", "in range(-90, 90+1, 15): lat_dict[lat] = str(lat) lon_dict = {} for lon in", "'Blossom Point, Maryland'}, {'label': _('South Atlantic, Falkland Islands'), 'value': 'South Atlantic, Falkland Islands'},", "not translate from this file. from flask_babel import Babel, _ # Controls for", "be hard coded into app.py in the translate_static function. Babel could not translate", "'value': 'Kano, Nigeria'}, {'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'}, {'label': _('Bretigny, France'), 'value':", "Newfoundland\"}, {'label': _('Orroral Valley, Australia'), 'value': 'Orroral Valley, Australia'}, {'label': _('Prince Albert, Canada'),", "Albert, Canada'}, {'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'}, {'label': _('Byrd Station, Antartica'), 'value':", "_('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'}, {'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'},", "Canada'}, {'label': _('Byrd Station, Antartica'), 'value': 'Byrd Station, Antartica'}, {'label': _('Las Palmas, Canary", "_('South Atlantic, Falkland Islands'), 'value': 'South Atlantic, Falkland Islands'}, {'label': _(\"St. John's, Newfoundland\"),", "Japan'), 'value': 'Kashima, Japan'}] # Getting only the values of the station names", "_('Resolute Bay, No. W. Territories'), 'value': 'Resolute Bay, No. W. Territories'}, {'label': _('Blossom", "John's, Newfoundland\"}, {'label': _('Orroral Valley, Australia'), 'value': 'Orroral Valley, Australia'}, {'label': _('Prince Albert,", "'Bretigny, France'}, {'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'}, {'label': _('Boulder, Colorado'), 'value': 'Boulder,", "Australia'), 'value': 'Woomera, Australia'}, {'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks,", "{'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'}, {'label': _('University of Alaska, Fairbanks, Alaska'), 'value':", "{'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg, South Africa'}, {'label': _('Kano, Nigeria'), 'value': 'Kano,", "Controls for webapp station_name_options = [ {'label': _('Resolute Bay, No. W. Territories'), 'value':", "_('Minimum Frequency'), 'value': ('fmin')}, {'label': _('Maximum Depth'), 'value': ('max_depth')}] year_dict = {} for", "_('Kashima, Japan'), 'value': 'Kashima, Japan'}] # Getting only the values of the station", "John's, Newfoundland\"), 'value': \"St. John's, Newfoundland\"}, {'label': _('Orroral Valley, Australia'), 'value': 'Orroral Valley,", "{'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'}, {'label': _('South Point, Hawaiian Islands'), 'value': 'South", "{'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'}, {'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'}, {'label':", "{'label': _('Prince Albert, Canada'), 'value': 'Prince Albert, Canada'}, {'label': _('Ottawa, Canada'), 'value': 'Ottawa,", "\"St. John's, Newfoundland\"}, {'label': _('Orroral Valley, Australia'), 'value': 'Orroral Valley, Australia'}, {'label': _('Prince", "_ # Controls for webapp station_name_options = [ {'label': _('Resolute Bay, No. W.", "'value': 'Byrd Station, Antartica'}, {'label': _('Las Palmas, Canary Island'), 'value': 'Las Palmas, Canary", "Territories'}, {'label': _('Blossom Point, Maryland'), 'value': 'Blossom Point, Maryland'}, {'label': _('South Atlantic, Falkland", "'value': ('lat')}, {'label': _('Longitude'), 'value': ('lon')}] y_axis_options = [ {'label': _('Minimum Frequency'), 'value':", "{'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'}, {'label': _('Byrd Station, Antartica'), 'value': 'Byrd Station,", "Islands'}, {'label': _(\"St. John's, Newfoundland\"), 'value': \"St. John's, Newfoundland\"}, {'label': _('Orroral Valley, Australia'),", "{'label': _('Las Palmas, Canary Island'), 'value': 'Las Palmas, Canary Island'}, {'label': _('Winkfield, England'),", "'Singapore, Malaysia'}, {'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'}, {'label': _('Mojave, California'), 'value': 'Mojave,", "Forks, Minnesota'}, {'label': _('Rosman, No. Carolina'), 'value': 'Rosman, No. Carolina'}, {'label': _('College, Fairbanks,", "'value': 'Gilmore Creek, Fairbanks, Alaska'}, {'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'}, {'label': _('University", "Maryland'), 'value': 'Blossom Point, Maryland'}, {'label': _('South Atlantic, Falkland Islands'), 'value': 'South Atlantic,", "Antartica'), 'value': 'Byrd Station, Antartica'}, {'label': _('Las Palmas, Canary Island'), 'value': 'Las Palmas,", "= str(lat) lon_dict = {} for lon in range(-180, 180+1, 30): lon_dict[lon] =", "'value': \"St. John's, Newfoundland\"}, {'label': _('Orroral Valley, Australia'), 'value': 'Orroral Valley, Australia'}, {'label':", "for year in range(1962,1974): year_dict[year] = str(year) lat_dict = {} for lat in", "'value': 'Lima, Peru'}, {'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg, South Africa'}, {'label': _('Kano,", "from this file. from flask_babel import Babel, _ # Controls for webapp station_name_options", "15): lat_dict[lat] = str(lat) lon_dict = {} for lon in range(-180, 180+1, 30):", "Japan'}] # Getting only the values of the station names station_values = []", "_('Quito, Ecuador'), 'value': 'Quito, Ecuador'}, {'label': _('South Point, Hawaiian Islands'), 'value': 'South Point,", "{'label': _('Date'), 'value': ('timestamp')}, {'label': _('Latitude'), 'value': ('lat')}, {'label': _('Longitude'), 'value': ('lon')}] y_axis_options", "'value': ('timestamp')}, {'label': _('Latitude'), 'value': ('lat')}, {'label': _('Longitude'), 'value': ('lon')}] y_axis_options = [", "Babel could not translate from this file. from flask_babel import Babel, _ #", "_('Woomera, Australia'), 'value': 'Woomera, Australia'}, {'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek,", "France'), 'value': 'Bretigny, France'}, {'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'}, {'label': _('Boulder, Colorado'),", "_('Las Palmas, Canary Island'), 'value': 'Las Palmas, Canary Island'}, {'label': _('Winkfield, England'), 'value':", "'value': 'Resolute Bay, No. W. Territories'}, {'label': _('Blossom Point, Maryland'), 'value': 'Blossom Point,", "'value': 'Kauai, Hawaii'}, {'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}] # Getting only the", "translate_static function. Babel could not translate from this file. from flask_babel import Babel,", "California'}, {'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'}, {'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}]", "Grand Forks, Minnesota'}, {'label': _('Rosman, No. Carolina'), 'value': 'Rosman, No. Carolina'}, {'label': _('College,", "station_name_options = [ {'label': _('Resolute Bay, No. W. Territories'), 'value': 'Resolute Bay, No.", "Creek, Fairbanks, Alaska'}, {'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'}, {'label': _('University of Alaska,", "into app.py in the translate_static function. Babel could not translate from this file.", "'value': 'Winkfield, England'}, {'label': _('Fort Myers, Florida'), 'value': 'Fort Myers, Florida'}, {'label': _('Antofagasta,", "Alaska'}, {'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'}, {'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'},", "year_dict = {} for year in range(1962,1974): year_dict[year] = str(year) lat_dict = {}", "year in range(1962,1974): year_dict[year] = str(year) lat_dict = {} for lat in range(-90,", "'value': 'South Point, Hawaiian Islands'}, {'label': _('Lima, Peru'), 'value': 'Lima, Peru'}, {'label': _('Johannesburg,", "'Antofagasta, Chile'}, {'label': _('East Grand Forks, Minnesota'), 'value': 'East Grand Forks, Minnesota'}, {'label':", "Ecuador'), 'value': 'Quito, Ecuador'}, {'label': _('South Point, Hawaiian Islands'), 'value': 'South Point, Hawaiian", "'Ottawa, Canada'}, {'label': _('Byrd Station, Antartica'), 'value': 'Byrd Station, Antartica'}, {'label': _('Las Palmas,", "This file is depreciated. Controls had to be hard coded into app.py in", "{'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'}, {'label': _('Tromso, Norway'),", "= [ {'label': _('Resolute Bay, No. W. Territories'), 'value': 'Resolute Bay, No. W.", "'Resolute Bay, No. W. Territories'}, {'label': _('Blossom Point, Maryland'), 'value': 'Blossom Point, Maryland'},", "'value': ('max_depth')}] year_dict = {} for year in range(1962,1974): year_dict[year] = str(year) lat_dict", "{'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}] # Getting only the values of the", "'Prince Albert, Canada'}, {'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'}, {'label': _('Byrd Station, Antartica'),", "90+1, 15): lat_dict[lat] = str(lat) lon_dict = {} for lon in range(-180, 180+1,", "Hawaiian Islands'), 'value': 'South Point, Hawaiian Islands'}, {'label': _('Lima, Peru'), 'value': 'Lima, Peru'},", "for lat in range(-90, 90+1, 15): lat_dict[lat] = str(lat) lon_dict = {} for", "'value': 'Tromso, Norway'}, {'label': _('University of Alaska, Fairbanks, Alaska'), 'value': 'University of Alaska,", "W. Territories'), 'value': 'Resolute Bay, No. W. Territories'}, {'label': _('Blossom Point, Maryland'), 'value':", "Florida'), 'value': 'Fort Myers, Florida'}, {'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'}, {'label': _('East", "Antartica'}, {'label': _('Las Palmas, Canary Island'), 'value': 'Las Palmas, Canary Island'}, {'label': _('Winkfield,", "('timestamp')}, {'label': _('Latitude'), 'value': ('lat')}, {'label': _('Longitude'), 'value': ('lon')}] y_axis_options = [ {'label':", "'value': 'Singapore, Malaysia'}, {'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'}, {'label': _('Mojave, California'), 'value':", "{} for year in range(1962,1974): year_dict[year] = str(year) lat_dict = {} for lat", "Colorado'), 'value': 'Boulder, Colorado'}, {'label': _('Mojave, California'), 'value': 'Mojave, California'}, {'label': _('Kauai, Hawaii'),", "No. Carolina'), 'value': 'Rosman, No. Carolina'}, {'label': _('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks,", "file. from flask_babel import Babel, _ # Controls for webapp station_name_options = [", "lat in range(-90, 90+1, 15): lat_dict[lat] = str(lat) lon_dict = {} for lon", "South Africa'}, {'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'}, {'label': _('Tananarive, Madagascar'), 'value': 'Tananarive,", "_('Darwin, Australia'), 'value': 'Darwin, Australia'}, {'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'}, {'label': _('South", "[] for station in station_name_options: station_values.append(station['value']) x_axis_options = [ {'label': _('Date'), 'value': ('timestamp')},", "Nigeria'), 'value': 'Kano, Nigeria'}, {'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'}, {'label': _('Bretigny, France'),", "{'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'}, {'label': _('Mojave, California'), 'value': 'Mojave, California'}, {'label':", "Territories'), 'value': 'Resolute Bay, No. W. Territories'}, {'label': _('Blossom Point, Maryland'), 'value': 'Blossom", "_(\"St. John's, Newfoundland\"), 'value': \"St. John's, Newfoundland\"}, {'label': _('Orroral Valley, Australia'), 'value': 'Orroral", "_('Longitude'), 'value': ('lon')}] y_axis_options = [ {'label': _('Minimum Frequency'), 'value': ('fmin')}, {'label': _('Maximum", "_('Lima, Peru'), 'value': 'Lima, Peru'}, {'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg, South Africa'},", "Maryland'}, {'label': _('South Atlantic, Falkland Islands'), 'value': 'South Atlantic, Falkland Islands'}, {'label': _(\"St.", "Station, Antartica'}, {'label': _('Las Palmas, Canary Island'), 'value': 'Las Palmas, Canary Island'}, {'label':", "'Boulder, Colorado'}, {'label': _('Mojave, California'), 'value': 'Mojave, California'}, {'label': _('Kauai, Hawaii'), 'value': 'Kauai,", "Fairbanks, Alaska'), 'value': 'University of Alaska, Fairbanks, Alaska'}, {'label': _('Darwin, Australia'), 'value': 'Darwin,", "'value': ('fmin')}, {'label': _('Maximum Depth'), 'value': ('max_depth')}] year_dict = {} for year in", "'Winkfield, England'}, {'label': _('Fort Myers, Florida'), 'value': 'Fort Myers, Florida'}, {'label': _('Antofagasta, Chile'),", "Islands'), 'value': 'South Atlantic, Falkland Islands'}, {'label': _(\"St. John's, Newfoundland\"), 'value': \"St. John's,", "Babel, _ # Controls for webapp station_name_options = [ {'label': _('Resolute Bay, No.", "Colorado'}, {'label': _('Mojave, California'), 'value': 'Mojave, California'}, {'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'},", "Atlantic, Falkland Islands'), 'value': 'South Atlantic, Falkland Islands'}, {'label': _(\"St. John's, Newfoundland\"), 'value':", "Alaska'}, {'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'}, {'label': _('University of Alaska, Fairbanks, Alaska'),", "Madagascar'}, {'label': _('Bretigny, France'), 'value': 'Bretigny, France'}, {'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'},", "Grand Forks, Minnesota'), 'value': 'East Grand Forks, Minnesota'}, {'label': _('Rosman, No. Carolina'), 'value':", "Peru'), 'value': 'Lima, Peru'}, {'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg, South Africa'}, {'label':", "{'label': _('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'}, {'label': _('Woomera, Australia'), 'value': 'Woomera,", "Australia'), 'value': 'Darwin, Australia'}, {'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'}, {'label': _('South Point,", "Point, Maryland'), 'value': 'Blossom Point, Maryland'}, {'label': _('South Atlantic, Falkland Islands'), 'value': 'South", "_('Boulder, Colorado'), 'value': 'Boulder, Colorado'}, {'label': _('Mojave, California'), 'value': 'Mojave, California'}, {'label': _('Kauai,", "from flask_babel import Babel, _ # Controls for webapp station_name_options = [ {'label':", "Nigeria'}, {'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'}, {'label': _('Bretigny, France'), 'value': 'Bretigny, France'},", "_('Tromso, Norway'), 'value': 'Tromso, Norway'}, {'label': _('University of Alaska, Fairbanks, Alaska'), 'value': 'University", "[ {'label': _('Resolute Bay, No. W. Territories'), 'value': 'Resolute Bay, No. W. Territories'},", "# This file is depreciated. Controls had to be hard coded into app.py", "Minnesota'), 'value': 'East Grand Forks, Minnesota'}, {'label': _('Rosman, No. Carolina'), 'value': 'Rosman, No.", "'value': 'Fort Myers, Florida'}, {'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'}, {'label': _('East Grand", "= {} for lat in range(-90, 90+1, 15): lat_dict[lat] = str(lat) lon_dict =", "lat_dict[lat] = str(lat) lon_dict = {} for lon in range(-180, 180+1, 30): lon_dict[lon]", "= [ {'label': _('Date'), 'value': ('timestamp')}, {'label': _('Latitude'), 'value': ('lat')}, {'label': _('Longitude'), 'value':", "'College, Fairbanks, Alaska'}, {'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'}, {'label': _('Gilmore Creek, Fairbanks,", "Peru'}, {'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg, South Africa'}, {'label': _('Kano, Nigeria'), 'value':", "in station_name_options: station_values.append(station['value']) x_axis_options = [ {'label': _('Date'), 'value': ('timestamp')}, {'label': _('Latitude'), 'value':", "'Mojave, California'}, {'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'}, {'label': _('Kashima, Japan'), 'value': 'Kashima,", "Alaska, Fairbanks, Alaska'), 'value': 'University of Alaska, Fairbanks, Alaska'}, {'label': _('Darwin, Australia'), 'value':", "Africa'}, {'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'}, {'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'},", "{'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'}, {'label': _('East Grand Forks, Minnesota'), 'value': 'East", "only the values of the station names station_values = [] for station in", "Australia'), 'value': 'Orroral Valley, Australia'}, {'label': _('Prince Albert, Canada'), 'value': 'Prince Albert, Canada'},", "'value': 'Las Palmas, Canary Island'}, {'label': _('Winkfield, England'), 'value': 'Winkfield, England'}, {'label': _('Fort", "Island'), 'value': 'Las Palmas, Canary Island'}, {'label': _('Winkfield, England'), 'value': 'Winkfield, England'}, {'label':", "y_axis_options = [ {'label': _('Minimum Frequency'), 'value': ('fmin')}, {'label': _('Maximum Depth'), 'value': ('max_depth')}]", "Palmas, Canary Island'}, {'label': _('Winkfield, England'), 'value': 'Winkfield, England'}, {'label': _('Fort Myers, Florida'),", "'value': 'Woomera, Australia'}, {'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'},", "{'label': _('Latitude'), 'value': ('lat')}, {'label': _('Longitude'), 'value': ('lon')}] y_axis_options = [ {'label': _('Minimum", "'Byrd Station, Antartica'}, {'label': _('Las Palmas, Canary Island'), 'value': 'Las Palmas, Canary Island'},", "'value': 'Darwin, Australia'}, {'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'}, {'label': _('South Point, Hawaiian", "Australia'}, {'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'}, {'label': _('South Point, Hawaiian Islands'), 'value':", "import Babel, _ # Controls for webapp station_name_options = [ {'label': _('Resolute Bay,", "Ecuador'}, {'label': _('South Point, Hawaiian Islands'), 'value': 'South Point, Hawaiian Islands'}, {'label': _('Lima,", "'value': 'Prince Albert, Canada'}, {'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'}, {'label': _('Byrd Station,", "the values of the station names station_values = [] for station in station_name_options:", "_('Rosman, No. Carolina'), 'value': 'Rosman, No. Carolina'}, {'label': _('College, Fairbanks, Alaska'), 'value': 'College,", "England'), 'value': 'Winkfield, England'}, {'label': _('Fort Myers, Florida'), 'value': 'Fort Myers, Florida'}, {'label':", "Malaysia'}, {'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'}, {'label': _('Mojave, California'), 'value': 'Mojave, California'},", "Islands'}, {'label': _('Lima, Peru'), 'value': 'Lima, Peru'}, {'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg,", "W. Territories'}, {'label': _('Blossom Point, Maryland'), 'value': 'Blossom Point, Maryland'}, {'label': _('South Atlantic,", "_('Kano, Nigeria'), 'value': 'Kano, Nigeria'}, {'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'}, {'label': _('Bretigny,", "_('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'}, {'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'}, {'label': _('Mojave,", "_('Winkfield, England'), 'value': 'Winkfield, England'}, {'label': _('Fort Myers, Florida'), 'value': 'Fort Myers, Florida'},", "'value': 'Kashima, Japan'}] # Getting only the values of the station names station_values", "_('East Grand Forks, Minnesota'), 'value': 'East Grand Forks, Minnesota'}, {'label': _('Rosman, No. Carolina'),", "'Quito, Ecuador'}, {'label': _('South Point, Hawaiian Islands'), 'value': 'South Point, Hawaiian Islands'}, {'label':", "Frequency'), 'value': ('fmin')}, {'label': _('Maximum Depth'), 'value': ('max_depth')}] year_dict = {} for year", "'value': 'Blossom Point, Maryland'}, {'label': _('South Atlantic, Falkland Islands'), 'value': 'South Atlantic, Falkland", "'Las Palmas, Canary Island'}, {'label': _('Winkfield, England'), 'value': 'Winkfield, England'}, {'label': _('Fort Myers,", "'value': 'Tananarive, Madagascar'}, {'label': _('Bretigny, France'), 'value': 'Bretigny, France'}, {'label': _('Singapore, Malaysia'), 'value':", "the station names station_values = [] for station in station_name_options: station_values.append(station['value']) x_axis_options =" ]
[ "model = TrpCollection template_name = 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model = TrpCollection form_class =", "reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageDelete, self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView): model", "= TrpPageFilterFormHelper table_class = TrpPageTable init_columns = [ 'id', 'part_of', 'page_nr', ] enable_merge", "import login_required from django.utils.decorators import method_decorator from django.urls import reverse, reverse_lazy from django.views.generic.detail", "'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentDelete, self).dispatch(*args, **kwargs)", "= TrpDocumentTable init_columns = [ 'id', 'title', ] enable_merge = True class TrpDocumentDetailView(BaseDetailView):", "enable_merge = True class TrpCollectionDetailView(BaseDetailView): model = TrpCollection template_name = 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView):", "self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self,", "dispatch(self, *args, **kwargs): return super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView): model = TrpCollection form_class", "dispatch(self, *args, **kwargs): return super(TrpPageUpdate, self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView): model = TrpPage template_name", "TrpTranscript filter_class = TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable init_columns = [", "= 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentDelete, self).dispatch(*args,", "reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView): model", "= TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable init_columns = [ 'id', 'id',", "enable_merge = True class TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript template_name = 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView):", "class TrpCollectionDetailView(BaseDetailView): model = TrpCollection template_name = 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model = TrpCollection", "self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView): model = TrpPage filter_class = TrpPageListFilter formhelper_class = TrpPageFilterFormHelper", "TrpTranscriptTable init_columns = [ 'id', 'id', ] enable_merge = True class TrpTranscriptDetailView(BaseDetailView): model", "TrpDocumentCreate(BaseCreateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "= True class TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript template_name = 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model", "= TrpTranscriptTable init_columns = [ 'id', 'id', ] enable_merge = True class TrpTranscriptDetailView(BaseDetailView):", "TrpTranscriptListView(GenericListView): model = TrpTranscript filter_class = TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable", "= TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptCreate, self).dispatch(*args,", "= TrpCollectionFilterFormHelper table_class = TrpCollectionTable init_columns = [ 'id', 'name', ] enable_merge =", "enable_merge = True class TrpPageDetailView(BaseDetailView): model = TrpPage template_name = 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView):", "form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class", "'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionDelete, self).dispatch(*args, **kwargs)", "enable_merge = True class TrpDocumentDetailView(BaseDetailView): model = TrpDocument template_name = 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView):", "**kwargs): return super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView): model = TrpDocument form_class = TrpDocumentForm", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView): model =", "**kwargs) class TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args,", "dispatch(self, *args, **kwargs): return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView): model = TrpDocument template_name", "class TrpDocumentDetailView(BaseDetailView): model = TrpDocument template_name = 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model = TrpDocument", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView): model =", "= TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageUpdate, self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView):", "] enable_merge = True class TrpCollectionDetailView(BaseDetailView): model = TrpCollection template_name = 'browsing/generic_detail.html' class", "TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView): model", "TrpDocument template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args,", "'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptDelete, self).dispatch(*args, **kwargs)", "= TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper table_class = TrpDocumentTable init_columns = [ 'id', 'title',", "= [ 'id', 'id', ] enable_merge = True class TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript", "= True class TrpCollectionDetailView(BaseDetailView): model = TrpCollection template_name = 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model", "table_class = TrpTranscriptTable init_columns = [ 'id', 'id', ] enable_merge = True class", "*args, **kwargs): return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript form_class =", "] enable_merge = True class TrpPageDetailView(BaseDetailView): model = TrpPage template_name = 'browsing/generic_detail.html' class", "= 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self,", "**kwargs) class TrpCollectionUpdate(BaseUpdateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args,", "formhelper_class = TrpDocumentFilterFormHelper table_class = TrpDocumentTable init_columns = [ 'id', 'title', ] enable_merge", "TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageUpdate, self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView): model", "[ 'id', 'title', ] enable_merge = True class TrpDocumentDetailView(BaseDetailView): model = TrpDocument template_name", "*args, **kwargs): return super(TrpPageDelete, self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView): model = TrpTranscript filter_class =", "*args, **kwargs): return super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView): model = TrpDocument filter_class =", "**kwargs) class TrpDocumentListView(GenericListView): model = TrpDocument filter_class = TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper table_class", "TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView): model", "self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView): model = TrpTranscript filter_class = TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper", "reverse, reverse_lazy from django.views.generic.detail import DetailView from django.views.generic.edit import DeleteView from . filters", "= 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionDelete, self).dispatch(*args,", "**kwargs): return super(TrpPageDelete, self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView): model = TrpTranscript filter_class = TrpTranscriptListFilter", "= True class TrpPageDetailView(BaseDetailView): model = TrpPage template_name = 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model", "TrpCollection template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "dispatch(self, *args, **kwargs): return super(TrpPageCreate, self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView): model = TrpPage form_class", ". forms import * from . tables import * from . models import", "def dispatch(self, *args, **kwargs): return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView): model = TrpDocument", "= 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self,", "TrpPageTable init_columns = [ 'id', 'part_of', 'page_nr', ] enable_merge = True class TrpPageDetailView(BaseDetailView):", "appcreator from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from django.urls import reverse,", "TrpPageListFilter formhelper_class = TrpPageFilterFormHelper table_class = TrpPageTable init_columns = [ 'id', 'part_of', 'page_nr',", "from . filters import * from . forms import * from . tables", "model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptUpdate,", "from . forms import * from . tables import * from . models", "TrpCollection, TrpDocument, TrpPage, TrpTranscript ) from browsing.browsing_utils import ( GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView", "self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView): model = TrpTranscript template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse')", "dispatch(self, *args, **kwargs): return super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView): model = TrpDocument filter_class", "TrpTranscript template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView): model", ". models import ( TrpCollection, TrpDocument, TrpPage, TrpTranscript ) from browsing.browsing_utils import (", "import DeleteView from . filters import * from . forms import * from", "**kwargs): return super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView): model = TrpCollection form_class = TrpCollectionForm", "model = TrpCollection filter_class = TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper table_class = TrpCollectionTable init_columns", "init_columns = [ 'id', 'name', ] enable_merge = True class TrpCollectionDetailView(BaseDetailView): model =", "'name', ] enable_merge = True class TrpCollectionDetailView(BaseDetailView): model = TrpCollection template_name = 'browsing/generic_detail.html'", "**kwargs): return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView): model = TrpDocument template_name = 'webpage/confirm_delete.html'", "return super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required)", "super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def", "model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptCreate,", "TrpDocumentDelete(DeleteView): model = TrpDocument template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self,", "( GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView ) class TrpCollectionListView(GenericListView): model = TrpCollection filter_class =", "TrpCollectionDelete(DeleteView): model = TrpCollection template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self,", "= [ 'id', 'name', ] enable_merge = True class TrpCollectionDetailView(BaseDetailView): model = TrpCollection", "from django.views.generic.edit import DeleteView from . filters import * from . forms import", "**kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView): model = TrpTranscript template_name = 'webpage/confirm_delete.html'", "= 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self,", "[ 'id', 'name', ] enable_merge = True class TrpCollectionDetailView(BaseDetailView): model = TrpCollection template_name", "filter_class = TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable init_columns = [ 'id',", "from django.utils.decorators import method_decorator from django.urls import reverse, reverse_lazy from django.views.generic.detail import DetailView", "model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentUpdate,", "( TrpCollection, TrpDocument, TrpPage, TrpTranscript ) from browsing.browsing_utils import ( GenericListView, BaseCreateView, BaseUpdateView,", "django.views.generic.edit import DeleteView from . filters import * from . forms import *", "form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class", "TrpPageFilterFormHelper table_class = TrpPageTable init_columns = [ 'id', 'part_of', 'page_nr', ] enable_merge =", "= TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentUpdate, self).dispatch(*args,", "model = TrpDocument template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self, *args,", "def dispatch(self, *args, **kwargs): return super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView): model = TrpPage", "TrpDocumentDetailView(BaseDetailView): model = TrpDocument template_name = 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model = TrpDocument form_class", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView): model =", "BaseUpdateView, BaseDetailView ) class TrpCollectionListView(GenericListView): model = TrpCollection filter_class = TrpCollectionListFilter formhelper_class =", "model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionUpdate,", "import * from . models import ( TrpCollection, TrpDocument, TrpPage, TrpTranscript ) from", "TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView): model", "TrpPage, TrpTranscript ) from browsing.browsing_utils import ( GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView ) class", "TrpCollection filter_class = TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper table_class = TrpCollectionTable init_columns = [", "filters import * from . forms import * from . tables import *", "class TrpDocumentUpdate(BaseUpdateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "TrpTranscript ) from browsing.browsing_utils import ( GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView ) class TrpCollectionListView(GenericListView):", "django.utils.decorators import method_decorator from django.urls import reverse, reverse_lazy from django.views.generic.detail import DetailView from", "] enable_merge = True class TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript template_name = 'browsing/generic_detail.html' class", "def dispatch(self, *args, **kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView): model = TrpTranscript", "generated by appcreator from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from django.urls", "TrpDocumentFilterFormHelper table_class = TrpDocumentTable init_columns = [ 'id', 'title', ] enable_merge = True", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView): model =", "class TrpDocumentListView(GenericListView): model = TrpDocument filter_class = TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper table_class =", "model = TrpPage template_name = 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model = TrpPage form_class =", "template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageDelete,", "] enable_merge = True class TrpDocumentDetailView(BaseDetailView): model = TrpDocument template_name = 'browsing/generic_detail.html' class", "'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args,", "class TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript template_name = 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model = TrpTranscript", "self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView): model = TrpDocument template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse')", "TrpPage template_name = 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required)", "TrpDocumentUpdate(BaseUpdateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView): model =", "def dispatch(self, *args, **kwargs): return super(TrpPageDelete, self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView): model = TrpTranscript", "TrpPageCreate(BaseCreateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "dispatch(self, *args, **kwargs): return super(TrpPageDelete, self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView): model = TrpTranscript filter_class", "**kwargs) class TrpTranscriptDelete(DeleteView): model = TrpTranscript template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse') @method_decorator(login_required)", "= TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView):", "return super(TrpPageUpdate, self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView): model = TrpPage template_name = 'webpage/confirm_delete.html' success_url", "dispatch(self, *args, **kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView): model = TrpTranscript template_name", "self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self,", "= TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageUpdate, self).dispatch(*args,", "def dispatch(self, *args, **kwargs): return super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView): model = TrpDocument", "super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView): model = TrpPage filter_class = TrpPageListFilter formhelper_class =", "class TrpCollectionListView(GenericListView): model = TrpCollection filter_class = TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper table_class =", ") class TrpCollectionListView(GenericListView): model = TrpCollection filter_class = TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper table_class", "True class TrpCollectionDetailView(BaseDetailView): model = TrpCollection template_name = 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model =", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageCreate, self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView): model =", "= TrpPageTable init_columns = [ 'id', 'part_of', 'page_nr', ] enable_merge = True class", "return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView): model = TrpDocument template_name = 'webpage/confirm_delete.html' success_url", ". tables import * from . models import ( TrpCollection, TrpDocument, TrpPage, TrpTranscript", "filter_class = TrpPageListFilter formhelper_class = TrpPageFilterFormHelper table_class = TrpPageTable init_columns = [ 'id',", "import ( TrpCollection, TrpDocument, TrpPage, TrpTranscript ) from browsing.browsing_utils import ( GenericListView, BaseCreateView,", "TrpCollectionTable init_columns = [ 'id', 'name', ] enable_merge = True class TrpCollectionDetailView(BaseDetailView): model", "= TrpDocument template_name = 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model = TrpDocument form_class = TrpDocumentForm", "TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentCreate, self).dispatch(*args, **kwargs)", "init_columns = [ 'id', 'title', ] enable_merge = True class TrpDocumentDetailView(BaseDetailView): model =", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageUpdate, self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView): model =", "TrpDocumentListView(GenericListView): model = TrpDocument filter_class = TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper table_class = TrpDocumentTable", "self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView): model = TrpDocument filter_class = TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper", "form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageCreate, self).dispatch(*args, **kwargs) class", "model = TrpPage template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self, *args,", "TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs)", "**kwargs) class TrpDocumentUpdate(BaseUpdateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args,", "**kwargs) class TrpDocumentDelete(DeleteView): model = TrpDocument template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required)", "**kwargs) class TrpTranscriptListView(GenericListView): model = TrpTranscript filter_class = TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper table_class", "TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs)", "def dispatch(self, *args, **kwargs): return super(TrpPageCreate, self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView): model = TrpPage", "= TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageCreate, self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView):", "tables import * from . models import ( TrpCollection, TrpDocument, TrpPage, TrpTranscript )", "by appcreator from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from django.urls import", "*args, **kwargs): return super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView): model = TrpCollection form_class =", "def dispatch(self, *args, **kwargs): return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView): model = TrpCollection", "DetailView from django.views.generic.edit import DeleteView from . filters import * from . forms", "DeleteView from . filters import * from . forms import * from .", "TrpCollectionListView(GenericListView): model = TrpCollection filter_class = TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper table_class = TrpCollectionTable", "[ 'id', 'id', ] enable_merge = True class TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript template_name", "= TrpPage template_name = 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model = TrpPage form_class = TrpPageForm", "success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class", "from . tables import * from . models import ( TrpCollection, TrpDocument, TrpPage,", "class TrpTranscriptListView(GenericListView): model = TrpTranscript filter_class = TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper table_class =", "**kwargs): return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript form_class = TrpTranscriptForm", "model = TrpPage filter_class = TrpPageListFilter formhelper_class = TrpPageFilterFormHelper table_class = TrpPageTable init_columns", "import reverse, reverse_lazy from django.views.generic.detail import DetailView from django.views.generic.edit import DeleteView from .", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageDelete, self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView): model =", "= TrpDocument filter_class = TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper table_class = TrpDocumentTable init_columns =", "<reponame>csae8092/djtranskribus # generated by appcreator from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator", "formhelper_class = TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable init_columns = [ 'id', 'id', ] enable_merge", "= 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self,", "TrpCollection template_name = 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required)", "'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args,", "True class TrpDocumentDetailView(BaseDetailView): model = TrpDocument template_name = 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model =", "*args, **kwargs): return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView): model = TrpDocument template_name =", "class TrpPageCreate(BaseCreateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "= TrpCollection template_name = 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model = TrpCollection form_class = TrpCollectionForm", "= TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView):", "return super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView): model = TrpPage filter_class = TrpPageListFilter formhelper_class", "= TrpCollection template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "TrpCollectionFilterFormHelper table_class = TrpCollectionTable init_columns = [ 'id', 'name', ] enable_merge = True", "template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentDelete,", "def dispatch(self, *args, **kwargs): return super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView): model = TrpDocument", "model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageUpdate,", "TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionCreate, self).dispatch(*args, **kwargs)", "import * from . forms import * from . tables import * from", "success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageDelete, self).dispatch(*args, **kwargs) class", "success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class", "init_columns = [ 'id', 'part_of', 'page_nr', ] enable_merge = True class TrpPageDetailView(BaseDetailView): model", "from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from django.urls import reverse, reverse_lazy", "TrpCollectionUpdate(BaseUpdateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView): model = TrpDocument filter_class = TrpDocumentListFilter formhelper_class =", "model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageCreate,", "class TrpPageListView(GenericListView): model = TrpPage filter_class = TrpPageListFilter formhelper_class = TrpPageFilterFormHelper table_class =", "filter_class = TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper table_class = TrpDocumentTable init_columns = [ 'id',", "= TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args,", "= 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptDelete, self).dispatch(*args,", "* from . models import ( TrpCollection, TrpDocument, TrpPage, TrpTranscript ) from browsing.browsing_utils", "TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageCreate, self).dispatch(*args, **kwargs)", "'id', 'title', ] enable_merge = True class TrpDocumentDetailView(BaseDetailView): model = TrpDocument template_name =", "= reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView):", "template_name = 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def", "**kwargs): return super(TrpPageUpdate, self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView): model = TrpPage template_name = 'webpage/confirm_delete.html'", "# generated by appcreator from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from", "GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView ) class TrpCollectionListView(GenericListView): model = TrpCollection filter_class = TrpCollectionListFilter", "'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageDelete, self).dispatch(*args, **kwargs)", "template_name = 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def", "BaseCreateView, BaseUpdateView, BaseDetailView ) class TrpCollectionListView(GenericListView): model = TrpCollection filter_class = TrpCollectionListFilter formhelper_class", "class TrpDocumentCreate(BaseCreateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "django.views.generic.detail import DetailView from django.views.generic.edit import DeleteView from . filters import * from", "**kwargs): return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView): model = TrpCollection template_name = 'webpage/confirm_delete.html'", "forms import * from . tables import * from . models import (", "model = TrpCollection template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self, *args,", "*args, **kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView): model = TrpTranscript template_name =", "self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView): model = TrpCollection template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse')", "from django.urls import reverse, reverse_lazy from django.views.generic.detail import DetailView from django.views.generic.edit import DeleteView", "= TrpTranscript filter_class = TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable init_columns =", "table_class = TrpDocumentTable init_columns = [ 'id', 'title', ] enable_merge = True class", "TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable init_columns = [ 'id', 'id', ] enable_merge = True", "'id', 'part_of', 'page_nr', ] enable_merge = True class TrpPageDetailView(BaseDetailView): model = TrpPage template_name", "reverse_lazy from django.views.generic.detail import DetailView from django.views.generic.edit import DeleteView from . filters import", "super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def", "import * from . tables import * from . models import ( TrpCollection,", "return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView): model = TrpCollection template_name = 'webpage/confirm_delete.html' success_url", "= TrpDocumentFilterFormHelper table_class = TrpDocumentTable init_columns = [ 'id', 'title', ] enable_merge =", "TrpPageUpdate(BaseUpdateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "= TrpPage template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "= [ 'id', 'title', ] enable_merge = True class TrpDocumentDetailView(BaseDetailView): model = TrpDocument", "template_name = 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def", "import ( GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView ) class TrpCollectionListView(GenericListView): model = TrpCollection filter_class", ". filters import * from . forms import * from . tables import", "super(TrpPageUpdate, self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView): model = TrpPage template_name = 'webpage/confirm_delete.html' success_url =", "model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentCreate,", "'id', 'id', ] enable_merge = True class TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript template_name =", "super(TrpPageCreate, self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def", "= TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageCreate, self).dispatch(*args,", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView): model =", "TrpPageDetailView(BaseDetailView): model = TrpPage template_name = 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model = TrpPage form_class", "dispatch(self, *args, **kwargs): return super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView): model = TrpPage filter_class", "*args, **kwargs): return super(TrpPageCreate, self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView): model = TrpPage form_class =", "TrpTranscript template_name = 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required)", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView): model =", "models import ( TrpCollection, TrpDocument, TrpPage, TrpTranscript ) from browsing.browsing_utils import ( GenericListView,", "class TrpCollectionCreate(BaseCreateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "class TrpTranscriptCreate(BaseCreateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "TrpCollectionDetailView(BaseDetailView): model = TrpCollection template_name = 'browsing/generic_detail.html' class TrpCollectionCreate(BaseCreateView): model = TrpCollection form_class", "class TrpPageDetailView(BaseDetailView): model = TrpPage template_name = 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model = TrpPage", "= True class TrpDocumentDetailView(BaseDetailView): model = TrpDocument template_name = 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model", "template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptDelete,", "True class TrpPageDetailView(BaseDetailView): model = TrpPage template_name = 'browsing/generic_detail.html' class TrpPageCreate(BaseCreateView): model =", "model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionCreate,", "dispatch(self, *args, **kwargs): return super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView): model = TrpDocument form_class", "TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageCreate, self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView): model", "= TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionUpdate, self).dispatch(*args,", "BaseDetailView ) class TrpCollectionListView(GenericListView): model = TrpCollection filter_class = TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper", "django.urls import reverse, reverse_lazy from django.views.generic.detail import DetailView from django.views.generic.edit import DeleteView from", "**kwargs) class TrpCollectionDelete(DeleteView): model = TrpCollection template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required)", "= TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView):", "def dispatch(self, *args, **kwargs): return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript", "super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def", "table_class = TrpPageTable init_columns = [ 'id', 'part_of', 'page_nr', ] enable_merge = True", "browsing.browsing_utils import ( GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView ) class TrpCollectionListView(GenericListView): model = TrpCollection", "* from . forms import * from . tables import * from .", "True class TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript template_name = 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model =", "self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self,", "method_decorator from django.urls import reverse, reverse_lazy from django.views.generic.detail import DetailView from django.views.generic.edit import", "return super(TrpPageDelete, self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView): model = TrpTranscript filter_class = TrpTranscriptListFilter formhelper_class", "super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView): model = TrpCollection template_name = 'webpage/confirm_delete.html' success_url =", "reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView): model", "*args, **kwargs): return super(TrpPageUpdate, self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView): model = TrpPage template_name =", "TrpTranscriptDelete(DeleteView): model = TrpTranscript template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse') @method_decorator(login_required) def dispatch(self,", "template_name = 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def", "TrpPageDelete(DeleteView): model = TrpPage template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self,", "class TrpCollectionDelete(DeleteView): model = TrpCollection template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def", "TrpPage template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "= TrpTranscript template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class", ") from browsing.browsing_utils import ( GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView ) class TrpCollectionListView(GenericListView): model", "*args, **kwargs): return super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView): model = TrpPage filter_class =", "model = TrpTranscript template_name = 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model = TrpTranscript form_class =", "class TrpPageUpdate(BaseUpdateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "return super(TrpPageCreate, self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required)", "= TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper table_class = TrpCollectionTable init_columns = [ 'id', 'name',", "return super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required)", "from django.views.generic.detail import DetailView from django.views.generic.edit import DeleteView from . filters import *", "class TrpPageDelete(DeleteView): model = TrpPage template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def", "= TrpCollectionTable init_columns = [ 'id', 'name', ] enable_merge = True class TrpCollectionDetailView(BaseDetailView):", "super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView): model = TrpTranscript template_name = 'webpage/confirm_delete.html' success_url =", "[ 'id', 'part_of', 'page_nr', ] enable_merge = True class TrpPageDetailView(BaseDetailView): model = TrpPage", "= TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentCreate, self).dispatch(*args,", "self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self,", "super(TrpPageDelete, self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView): model = TrpTranscript filter_class = TrpTranscriptListFilter formhelper_class =", "django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from django.urls import reverse, reverse_lazy from", "= 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageDelete, self).dispatch(*args,", "'part_of', 'page_nr', ] enable_merge = True class TrpPageDetailView(BaseDetailView): model = TrpPage template_name =", "formhelper_class = TrpCollectionFilterFormHelper table_class = TrpCollectionTable init_columns = [ 'id', 'name', ] enable_merge", "model = TrpDocument filter_class = TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper table_class = TrpDocumentTable init_columns", "import DetailView from django.views.generic.edit import DeleteView from . filters import * from .", "TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper table_class = TrpCollectionTable init_columns = [ 'id', 'name', ]", "* from . tables import * from . models import ( TrpCollection, TrpDocument,", "TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper table_class = TrpDocumentTable init_columns = [ 'id', 'title', ]", "class TrpTranscriptDelete(DeleteView): model = TrpTranscript template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse') @method_decorator(login_required) def", "formhelper_class = TrpPageFilterFormHelper table_class = TrpPageTable init_columns = [ 'id', 'part_of', 'page_nr', ]", "= TrpCollection filter_class = TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper table_class = TrpCollectionTable init_columns =", "TrpCollectionCreate(BaseCreateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "= reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView):", "'id', 'name', ] enable_merge = True class TrpCollectionDetailView(BaseDetailView): model = TrpCollection template_name =", "return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs) class TrpTranscriptDelete(DeleteView): model = TrpTranscript template_name = 'webpage/confirm_delete.html' success_url", "self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView): model = TrpPage template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse')", "class TrpCollectionUpdate(BaseUpdateView): model = TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "TrpDocument template_name = 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model = TrpDocument form_class = TrpDocumentForm @method_decorator(login_required)", "*args, **kwargs): return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView): model = TrpCollection template_name =", "= TrpDocument template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "**kwargs): return super(TrpPageCreate, self).dispatch(*args, **kwargs) class TrpPageUpdate(BaseUpdateView): model = TrpPage form_class = TrpPageForm", "= TrpPageListFilter formhelper_class = TrpPageFilterFormHelper table_class = TrpPageTable init_columns = [ 'id', 'part_of',", "TrpDocument filter_class = TrpDocumentListFilter formhelper_class = TrpDocumentFilterFormHelper table_class = TrpDocumentTable init_columns = [", "*args, **kwargs): return super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class TrpDocumentUpdate(BaseUpdateView): model = TrpDocument form_class =", "= TrpPage filter_class = TrpPageListFilter formhelper_class = TrpPageFilterFormHelper table_class = TrpPageTable init_columns =", "'id', ] enable_merge = True class TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript template_name = 'browsing/generic_detail.html'", "= TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable init_columns = [ 'id', 'id', ] enable_merge =", "table_class = TrpCollectionTable init_columns = [ 'id', 'name', ] enable_merge = True class", "'page_nr', ] enable_merge = True class TrpPageDetailView(BaseDetailView): model = TrpPage template_name = 'browsing/generic_detail.html'", "= TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView):", "**kwargs) class TrpPageDelete(DeleteView): model = TrpPage template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trppage_browse') @method_decorator(login_required)", "TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView): model", "filter_class = TrpCollectionListFilter formhelper_class = TrpCollectionFilterFormHelper table_class = TrpCollectionTable init_columns = [ 'id',", "**kwargs): return super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView): model = TrpDocument filter_class = TrpDocumentListFilter", "model = TrpTranscript template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trptranscript_browse') @method_decorator(login_required) def dispatch(self, *args,", "TrpDocument form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs)", "import method_decorator from django.urls import reverse, reverse_lazy from django.views.generic.detail import DetailView from django.views.generic.edit", "from browsing.browsing_utils import ( GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView ) class TrpCollectionListView(GenericListView): model =", "= TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView):", "template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpcollection_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionDelete,", "form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageUpdate, self).dispatch(*args, **kwargs) class", "'title', ] enable_merge = True class TrpDocumentDetailView(BaseDetailView): model = TrpDocument template_name = 'browsing/generic_detail.html'", "dispatch(self, *args, **kwargs): return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript form_class", "model = TrpTranscript filter_class = TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable init_columns", "model = TrpDocument template_name = 'browsing/generic_detail.html' class TrpDocumentCreate(BaseCreateView): model = TrpDocument form_class =", "TrpPageListView(GenericListView): model = TrpPage filter_class = TrpPageListFilter formhelper_class = TrpPageFilterFormHelper table_class = TrpPageTable", "form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class", "**kwargs): return super(TrpDocumentDelete, self).dispatch(*args, **kwargs) class TrpPageListView(GenericListView): model = TrpPage filter_class = TrpPageListFilter", "TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "TrpTranscriptListFilter formhelper_class = TrpTranscriptFilterFormHelper table_class = TrpTranscriptTable init_columns = [ 'id', 'id', ]", "'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args,", "return super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView): model = TrpDocument filter_class = TrpDocumentListFilter formhelper_class", "TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView): model", "TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageUpdate, self).dispatch(*args, **kwargs)", "def dispatch(self, *args, **kwargs): return super(TrpCollectionCreate, self).dispatch(*args, **kwargs) class TrpCollectionUpdate(BaseUpdateView): model = TrpCollection", "**kwargs) class TrpPageListView(GenericListView): model = TrpPage filter_class = TrpPageListFilter formhelper_class = TrpPageFilterFormHelper table_class", "form_class = TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentCreate, self).dispatch(*args, **kwargs) class", "@method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionDelete, self).dispatch(*args, **kwargs) class TrpDocumentListView(GenericListView): model =", "TrpTranscriptDetailView(BaseDetailView): model = TrpTranscript template_name = 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model = TrpTranscript form_class", "super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView): model = TrpDocument template_name = 'webpage/confirm_delete.html' success_url =", "dispatch(self, *args, **kwargs): return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class TrpCollectionDelete(DeleteView): model = TrpCollection template_name", "TrpTranscriptCreate(BaseCreateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return", "TrpDocument, TrpPage, TrpTranscript ) from browsing.browsing_utils import ( GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView )", "TrpPage filter_class = TrpPageListFilter formhelper_class = TrpPageFilterFormHelper table_class = TrpPageTable init_columns = [", "return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs) class TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required)", "= [ 'id', 'part_of', 'page_nr', ] enable_merge = True class TrpPageDetailView(BaseDetailView): model =", "form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs) class", "**kwargs) class TrpPageUpdate(BaseUpdateView): model = TrpPage form_class = TrpPageForm @method_decorator(login_required) def dispatch(self, *args,", "= TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionCreate, self).dispatch(*args,", "class TrpDocumentDelete(DeleteView): model = TrpDocument template_name = 'webpage/confirm_delete.html' success_url = reverse_lazy('archiv:trpdocument_browse') @method_decorator(login_required) def", "= reverse_lazy('archiv:trppage_browse') @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpPageDelete, self).dispatch(*args, **kwargs) class TrpTranscriptListView(GenericListView):", "def dispatch(self, *args, **kwargs): return super(TrpPageUpdate, self).dispatch(*args, **kwargs) class TrpPageDelete(DeleteView): model = TrpPage", "login_required from django.utils.decorators import method_decorator from django.urls import reverse, reverse_lazy from django.views.generic.detail import", "= TrpDocumentForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs) class TrpDocumentDelete(DeleteView):", "from . models import ( TrpCollection, TrpDocument, TrpPage, TrpTranscript ) from browsing.browsing_utils import", "TrpCollection form_class = TrpCollectionForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs)", "class TrpTranscriptUpdate(BaseUpdateView): model = TrpTranscript form_class = TrpTranscriptForm @method_decorator(login_required) def dispatch(self, *args, **kwargs):", "TrpDocumentTable init_columns = [ 'id', 'title', ] enable_merge = True class TrpDocumentDetailView(BaseDetailView): model", "= TrpTranscript template_name = 'browsing/generic_detail.html' class TrpTranscriptCreate(BaseCreateView): model = TrpTranscript form_class = TrpTranscriptForm", "init_columns = [ 'id', 'id', ] enable_merge = True class TrpTranscriptDetailView(BaseDetailView): model =" ]
[ "INIT = 16 _CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR' :", "descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message", "5 COMMAND6 = 6 COMMAND7 = 7 COMMAND8 = 8 COMMAND9 = 9", "the protocol buffer compiler. DO NOT EDIT! # source: command.proto \"\"\"Generated protocol buffer", "COMMAND3 = 3 COMMAND4 = 4 COMMAND5 = 5 COMMAND6 = 6 COMMAND7", "= 3 COMMAND4 = 4 COMMAND5 = 5 COMMAND6 = 6 COMMAND7 =", "import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import", "reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db =", "= 10 MAP = 11 ACK = 12 ACK2 = 13 HERO =", "\"\"\"Generated protocol buffer code.\"\"\" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as", "EDIT! # source: command.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf.internal import enum_type_wrapper from", "ACK2 = 13 HERO = 14 READY = 15 INIT = 16 _CMD", "3 COMMAND4 = 4 COMMAND5 = 5 COMMAND6 = 6 COMMAND7 = 7", "= 8 COMMAND9 = 9 MAPORIGINAL = 10 MAP = 11 ACK =", "import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02", "google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf", "= 4 COMMAND5 = 5 COMMAND6 = 6 COMMAND7 = 7 COMMAND8 =", "import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db", "coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT!", "from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from", "google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool", "google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf", "= DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0 RAWKEYUP = 1 COMMAND2 =", "COMMAND2 = 2 COMMAND3 = 3 COMMAND4 = 4 COMMAND5 = 5 COMMAND6", "from google.protobuf import message as _message from google.protobuf import reflection as _reflection from", "= DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR' : _CMD, '__module__' : 'command_pb2'", "protocol buffer code.\"\"\" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor", "MAP = 11 ACK = 12 ACK2 = 13 HERO = 14 READY", "_symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType =", "DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0 RAWKEYUP = 1 COMMAND2 = 2", "# @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _CMDTYPE._serialized_start=116 _CMDTYPE._serialized_end=343", "CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0 RAWKEYUP = 1 COMMAND2 = 2 COMMAND3", "as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as", "# @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE", "COMMAND6 = 6 COMMAND7 = 7 COMMAND8 = 8 COMMAND9 = 9 MAPORIGINAL", "symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03", "= _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType", "COMMAND9 = 9 MAPORIGINAL = 10 MAP = 11 ACK = 12 ACK2", "1 COMMAND2 = 2 COMMAND3 = 3 COMMAND4 = 4 COMMAND5 = 5", "_sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _CMDTYPE._serialized_start=116 _CMDTYPE._serialized_end=343 _CMD._serialized_start=28 _CMD._serialized_end=113 #", "'__module__' : 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options =", "@@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE =", "Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR' : _CMD, '__module__' : 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd)", "if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _CMDTYPE._serialized_start=116 _CMDTYPE._serialized_end=343 _CMD._serialized_start=28 _CMD._serialized_end=113 # @@protoc_insertion_point(module_scope)", "buffer compiler. DO NOT EDIT! # source: command.proto \"\"\"Generated protocol buffer code.\"\"\" from", "\\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0 RAWKEYUP =", "_symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3')", "16 _CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR' : _CMD, '__module__'", "(_message.Message,), { 'DESCRIPTOR' : _CMD, '__module__' : 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if", "from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR =", "0 RAWKEYUP = 1 COMMAND2 = 2 COMMAND3 = 3 COMMAND4 = 4", "COMMAND4 = 4 COMMAND5 = 5 COMMAND6 = 6 COMMAND7 = 7 COMMAND8", "from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import", "14 READY = 15 INIT = 16 _CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd',", "Generated by the protocol buffer compiler. DO NOT EDIT! # source: command.proto \"\"\"Generated", "_CMD, '__module__' : 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options", "buffer code.\"\"\" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from", "enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0 RAWKEYUP = 1 COMMAND2 = 2 COMMAND3 = 3", "by the protocol buffer compiler. DO NOT EDIT! # source: command.proto \"\"\"Generated protocol", "RAWKEYUP = 1 COMMAND2 = 2 COMMAND3 = 3 COMMAND4 = 4 COMMAND5", "= 15 INIT = 16 _CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), {", "source: command.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf.internal import enum_type_wrapper from google.protobuf import", "enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool", "7 COMMAND8 = 8 COMMAND9 = 9 MAPORIGINAL = 10 MAP = 11", "-*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source:", "= 6 COMMAND7 = 7 COMMAND8 = 8 COMMAND9 = 9 MAPORIGINAL =", "\\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0 RAWKEYUP = 1", "utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! #", "# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO", "as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as", "ACK = 12 ACK2 = 13 HERO = 14 READY = 15 INIT", "DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR' : _CMD, '__module__' : 'command_pb2' #", ": _CMD, '__module__' : 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS == False:", "_CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0 RAWKEYUP = 1 COMMAND2", "as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default()", "_CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR' : _CMD, '__module__' :", "9 MAPORIGINAL = 10 MAP = 11 ACK = 12 ACK2 = 13", "command.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor", "8 COMMAND9 = 9 MAPORIGINAL = 10 MAP = 11 ACK = 12", "_reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR", "-*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT", "'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _CMDTYPE._serialized_start=116", "@@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _CMDTYPE._serialized_start=116 _CMDTYPE._serialized_end=343 _CMD._serialized_start=28", "message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database", "15 INIT = 16 _CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR'", "protocol buffer compiler. DO NOT EDIT! # source: command.proto \"\"\"Generated protocol buffer code.\"\"\"", "code.\"\"\" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf", "# source: command.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf.internal import enum_type_wrapper from google.protobuf", "= 11 ACK = 12 ACK2 = 13 HERO = 14 READY =", "as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04", "RAWKEYDOWN = 0 RAWKEYUP = 1 COMMAND2 = 2 COMMAND3 = 3 COMMAND4", "= 14 READY = 15 INIT = 16 _CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd =", "as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as", "6 COMMAND7 = 7 COMMAND8 = 8 COMMAND9 = 9 MAPORIGINAL = 10", "'DESCRIPTOR' : _CMD, '__module__' : 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS ==", "_descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message", "= enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0 RAWKEYUP = 1 COMMAND2 = 2 COMMAND3 =", "HERO = 14 READY = 15 INIT = 16 _CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd", "COMMAND8 = 8 COMMAND9 = 9 MAPORIGINAL = 10 MAP = 11 ACK", "# Generated by the protocol buffer compiler. DO NOT EDIT! # source: command.proto", "READY = 15 INIT = 16 _CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,),", "= 2 COMMAND3 = 3 COMMAND4 = 4 COMMAND5 = 5 COMMAND6 =", "= 16 _CMD = DESCRIPTOR.message_types_by_name['Cmd'] Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR' : _CMD,", "COMMAND5 = 5 COMMAND6 = 6 COMMAND7 = 7 COMMAND8 = 8 COMMAND9", "google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf", "13 HERO = 14 READY = 15 INIT = 16 _CMD = DESCRIPTOR.message_types_by_name['Cmd']", "compiler. DO NOT EDIT! # source: command.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf.internal", "\\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0 RAWKEYUP", "NOT EDIT! # source: command.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf.internal import enum_type_wrapper", "from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database #", "2 COMMAND3 = 3 COMMAND4 = 4 COMMAND5 = 5 COMMAND6 = 6", "_reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR' : _CMD, '__module__' : 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd)", "4 COMMAND5 = 5 COMMAND6 = 6 COMMAND7 = 7 COMMAND8 = 8", "= 13 HERO = 14 READY = 15 INIT = 16 _CMD =", "DO NOT EDIT! # source: command.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf.internal import", "from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from", "_message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database", "11 ACK = 12 ACK2 = 13 HERO = 14 READY = 15", "MAPORIGINAL = 10 MAP = 11 ACK = 12 ACK2 = 13 HERO", ": 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None", "import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import", "_descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection", "= 1 COMMAND2 = 2 COMMAND3 = 3 COMMAND4 = 4 COMMAND5 =", "= 12 ACK2 = 13 HERO = 14 READY = 15 INIT =", "DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE)", "= _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), { 'DESCRIPTOR' : _CMD, '__module__' : 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) })", "_descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN =", "}) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _CMDTYPE._serialized_start=116 _CMDTYPE._serialized_end=343 _CMD._serialized_start=28 _CMD._serialized_end=113", "= _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN", "= 5 COMMAND6 = 6 COMMAND7 = 7 COMMAND8 = 8 COMMAND9 =", "google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports)", "= 0 RAWKEYUP = 1 COMMAND2 = 2 COMMAND3 = 3 COMMAND4 =", "import message as _message from google.protobuf import reflection as _reflection from google.protobuf import", "COMMAND7 = 7 COMMAND8 = 8 COMMAND9 = 9 MAPORIGINAL = 10 MAP", "{ 'DESCRIPTOR' : _CMD, '__module__' : 'command_pb2' # @@protoc_insertion_point(class_scope:commander.Cmd) }) _sym_db.RegisterMessage(Cmd) if _descriptor._USE_C_DESCRIPTORS", "import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as", "google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01", "= 9 MAPORIGINAL = 10 MAP = 11 ACK = 12 ACK2 =", "descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection", "10 MAP = 11 ACK = 12 ACK2 = 13 HERO = 14", "12 ACK2 = 13 HERO = 14 READY = 15 INIT = 16", "_sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\rcommand.proto\\x12\\tcommander\\\"U\\n\\x03\\x43md\\x12\\x1f\\n\\x03\\x63md\\x18\\x01 \\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType']", "\\x01(\\x0e\\x32\\x12.commander.CmdType\\x12\\x0c\\n\\x04ints\\x18\\x02 \\x03(\\x05\\x12\\x0e\\n\\x06\\x66loats\\x18\\x03 \\x03(\\x02\\x12\\x0f\\n\\x07strings\\x18\\x04 \\x03(\\t*\\xe3\\x01\\n\\x07\\x43mdType\\x12\\x0e\\n\\nRAWKEYDOWN\\x10\\x00\\x12\\x0c\\n\\x08RAWKEYUP\\x10\\x01\\x12\\x0c\\n\\x08\\x43OMMAND2\\x10\\x02\\x12\\x0c\\n\\x08\\x43OMMAND3\\x10\\x03\\x12\\x0c\\n\\x08\\x43OMMAND4\\x10\\x04\\x12\\x0c\\n\\x08\\x43OMMAND5\\x10\\x05\\x12\\x0c\\n\\x08\\x43OMMAND6\\x10\\x06\\x12\\x0c\\n\\x08\\x43OMMAND7\\x10\\x07\\x12\\x0c\\n\\x08\\x43OMMAND8\\x10\\x08\\x12\\x0c\\n\\x08\\x43OMMAND9\\x10\\t\\x12\\x0f\\n\\x0bMAPORIGINAL\\x10\\n\\x12\\x07\\n\\x03MAP\\x10\\x0b\\x12\\x07\\n\\x03\\x41\\x43K\\x10\\x0c\\x12\\x08\\n\\x04\\x41\\x43K2\\x10\\r\\x12\\x08\\n\\x04HERO\\x10\\x0e\\x12\\t\\n\\x05READY\\x10\\x0f\\x12\\x08\\n\\x04INIT\\x10\\x10\\x62\\x06proto3') _CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType'] CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE) RAWKEYDOWN = 0", "= 7 COMMAND8 = 8 COMMAND9 = 9 MAPORIGINAL = 10 MAP =" ]
[ "Returns ------- `pandas.DataFrame` (51, (8,13)) (log_age, (event_types, metallicity) A pandas MultiIndex dataframe containing", "# ##################################### def rates_all_z(data_path, imf, binary=True): \"\"\" Loads the BPASS supernova event files.", "\"\"\" Loads a BPASS output file Parameters ---------- path : str Path to", "can put them in a dataframe # with some named columns and set", "time this function is ran on a folder it will generate a pickle", ": str Path to the file containing the target data. hr_type : str,", "'rb')) # TODO: Deprecation warning def set_models_path(path): \"\"\" Changes the path to the", "'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F',", "binary else \"sin\" # check IMF key if imf not in BPASS_IMFS: raise", "updated by looking at this file:' '\\n'+path_to_settings) ######################## # LOAD DUMMY VARIABLE #", "os.path.isfile(path), 'File not found.' return pickle.load(open(path, 'rb')) # TODO: Deprecation warning def set_models_path(path):", "name row = [1, 0, 0, 0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue elif", "return _colours(path) elif \"hrs\" in path and hr_type == 'TL': return _hrTL(path) elif", "'stellar_mass', 'remnant_mass'], engine='python') def _hrTL(path): \"\"\" Load HR diagrams (TL type) \"\"\" #", "Ia | IIP | ... | PISNe | low_mass | |Metallicity| 0.00001 |", "folder it will generate an npy file containing all the BPASS emissivities for", "the value of type # To know what each value means, consult the", "'f300w', 'f336w', 'f435w', 'f450w', 'f555w', 'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV']) ########################## #", "The first time this function is run on a folder it will generate", "filename\"\"\" inv_dict ={v: k for k, v in dummy_dicts[bpass_version].items()} cols = [inv_dict[key] if", "the file in the same folder with the name: `all_spectra-[bin/sin]-[imf].pkl` The spectra are", "need the next line for more inputs # we set the vector to", "not a BPASS IMF. Please select a correct IMF.\\n\"\\ \"These can be found", "the next line. continue # This line contains the imf probability and the", "# Check population type star = \"bin\" if binary else \"sin\" # check", ": `str` The filepath to the folder containing the BPASS data binary :", "set the datatypes to strings and numbers. input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs,", "df from a filename\"\"\" inv_dict ={v: k for k, v in dummy_dicts[bpass_version].items()} cols", "hr from hoki.constants import * import os import yaml import io import pickle", "| IIP | ... | PISNe | low_mass | |Metallicity| 0.00001 | 0.00001", "but we also need to read the next line so we set the", "reset the vector to be reading in filenames because no more inputs are", "looking at this file:' '\\n'+path_to_settings) ######################## # LOAD DUMMY VARIABLE # ######################## def", "folder containing the BPASS data binary : `bool` Use the binary files or", "import hoki.hrdiagrams as hr from hoki.constants import * import os import yaml import", "# Then depending on whether we need the next line for more inputs", "by looking at this file:' '\\n'+path_to_settings) ######################## # LOAD DUMMY VARIABLE # ########################", "them in their rightful lists. elements = l.split() initialBH.append(elements[0]) initialP.append(elements[0]) # We then", "'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python') def _yields(path): \"\"\" Load One yields file into", "load the data into a numpy arrays as well or is the #", "the BPASS data binary : `bool` Use the binary files or just the", "'\\n'+path_to_settings) ######################## # LOAD DUMMY VARIABLE # ######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in", "hoki's settings Parameters ---------- path : str, Absolute path to the top level", "reading in filenames because no more inputs are coming row = [1, 0,", "path is incorrect.\" if 'UV' in path: return _UV_nebular_lines(path) elif 'Optical' in path:", "default_flow_style=False, allow_unicode=True) print('Looks like everything went well! You can check the path was", "== 'TTG': return _hrTTG(path) else: print(\"HOKI ERROR -- Could not load the Stellar", "was correctly updated by looking at this file:' '\\n'+path_to_settings) ######################## # LOAD DUMMY", "hoki.data_compilers.SpectraCompiler( data_path, data_path, imf, binary=binary ) spectra = spec.output return spectra def emissivities_all_z(data_path,", "Default=True imf : `str` BPASS Identifier of the IMF to be used. The", "elements = l.split() initialBH.append(elements[0]) initialP.append(elements[0]) # We then reset the vector to be", "0, 0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) != 4: # If", "'8.1', '8.2', '8.3', '8.4', '8.5', '8.6', '8.7', '8.8', '8.9', '9.0', '9.1', '9.2', '9.3',", "else 'Nan'+str(key) for key in range(96)] dummy = pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python') return", "imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not a BPASS IMF. Please", "= pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python') return dummy ######################### # MODEL INPUT FUNCTIONS #", "in filenames because no more inputs are coming row = [1, 0, 0,", "pandas MultiIndex dataframe containing the BPASS number of events per metallicity per type.", "'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F',", "row = [0, 0, 1, 1] continue elif row[2]: # Splitting the two", "| log_age |---------|----------|------|-------|----------| | 6.0 | | ... | Event Rate values |", "and set the datatypes to strings and numbers. input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf':", "HokiTypeError(\"The folder location is expected to be a string.\") # Check if compiled", "\"bin\" if binary else \"sin\" # Check if the given IMF is in", "files. Notes ----- The first time this function is ran on a folder", "emissivities ################# # # ################# def _do_not_use(): import webbrowser url = \"https://www.youtube.com/watch?v=dQw4w9WgXcQ\" webbrowser.open_new_tab(url)", "file containing the input data. Returns ------- \"\"\" assert isinstance(path, str), \"The location", "to the file containing the target data. hr_type : str, optional Type of", "generate a pickle file containing all the BPASS spectra per metallicity for faster", "incorrect.\" lines = open(path).read().split(\"\\n\") # rows [a,b,c,d] in the BPASS manual row =", "elif int(elements[1]) != 4: # If type is 2 or 3, we know", "print(\"Load precompiled file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") # Compile the spectra for", "path: return _ionizing_flux(path) elif \"colour\" in path: return _colours(path) elif \"hrs\" in path", "Type | Ia | IIP | ... | PISNe | low_mass | |Metallicity|", "else \"sin\" # Check if the given IMF is in the accepted IMFs", "array containing all the BPASS emissivities (Nion [1/s], L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV", "compiled spectra are already present in data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\")", "data_path, imf, binary=binary ) emissivities = res.output return emissivities ################# # # #################", "MODEL OUTPUT FUNCTIONS # ########################## def model_output(path, hr_type=None): \"\"\" Loads a BPASS output", "- `\"imf_chab300\"` - `\"imf100_100\"` - `\"imf100_300\"` - `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` -", "\"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'V-I', 'U', 'B', 'V', 'R', 'I', 'J',", "and filled our lists, we can put them in a dataframe # with", "file is expected to be a string.\" assert os.path.isfile(path), \"HOKI: ERROR This file", "will append a NaN filenames = [] modelimfs = [] modeltypes = []", "'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python',", "'10.7', '10.8', '10.9', '11.0']) def _ionizing_flux(path): \"\"\" Load One ionizing flux file \"\"\"", "# NEBULAR EMISSION LINES # ########################## def nebular_lines(path): \"\"\" Load the nebular line", "\"starmass\" in path: return _stellar_masses(path) elif \"spectra\" in path: return _sed(path) elif \"ioniz\"", "str, optional Type of HR diagram to load: 'TL', 'Tg' or 'TTG'. Returns", "yaml import io import pickle import pkg_resources import hoki.data_compilers import warnings from hoki.utils.exceptions", "path : str Path to the file containing the input data. Returns -------", "mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) != 4: # If type is 2", "pickle file containing all the BPASS spectra per metallicity for faster loading in", "'10.5', '10.6', '10.7', '10.8', '10.9', '11.0']) def _ionizing_flux(path): \"\"\" Load One ionizing flux", "in path: return _stellar_masses(path) elif \"spectra\" in path: return _sed(path) elif \"ioniz\" in", "wavelength)] A 3D numpy array containing all the BPASS spectra for a specific", "def _hrTL(path): \"\"\" Load HR diagrams (TL type) \"\"\" # 'a' is just", "read from file and not normalised. Input ----- data_path : `str` The path", "this could be a directory named something like bpass-v2.2-newmodels and the next level", "_hrTTG(path) else: print(\"HOKI ERROR -- Could not load the Stellar Population output. \"", "(8,13)) (log_age, (event_types, metallicity) A pandas MultiIndex dataframe containing the BPASS number of", "Check if compiled spectra are already present in data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load", "BPASS spectra from files. Notes ----- The first time this function is ran", "if binary else \"sin\" # Check if the given IMF is in the", "outputs, so we need the next 2 lines. # We set the vector", "- `\"imf170_100\"` - `\"imf170_300\"` Returns ------- spectra : `numpy.ndarray` (13, 51, 100000) [(metallicity,", "########################## def model_output(path, hr_type=None): \"\"\" Loads a BPASS output file Parameters ---------- path", "new file name row = [1, 0, 0, 0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan)", "'7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '8.0', '8.1', '8.2', '8.3',", "'NUV']) ########################## # NEBULAR EMISSION LINES # ########################## def nebular_lines(path): \"\"\" Load the", "HokiDeprecationWarning, HokiKeyError # TODO: Should I allow people to chose to load the", "print(\"Loading precompiled file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") # Otherwise compile else: print(\"Compiled", "Load all BPASS spectra from files. Notes ----- The first time this function", "otherwise else: print(\"Compiled file not found. Data will be compiled.\") res = hoki.data_compilers.EmissivityCompiler(", "BPASS manual row = [1, 0, 0, 0] # All potential input parameters", "----- data_path : `str` The path to the folder containing the BPASS spectra.", "log_age as a float. The column is a `pandas.MultiIndex` with the event types", "'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines) ##################################### # BPASS Load over", "the filename correct?\" \"\\n2) Trying to load an HR diagram? \" \"Make sure", "999 Angstrom) \"\"\" # Check population type star = \"bin\" if binary else", "f\"{imf} is not a BPASS IMF. Please select a correct IMF.\") # check", "Data will be compiled\") spec = hoki.data_compilers.SpectraCompiler( data_path, data_path, imf, binary=binary ) spectra", "binary=True): \"\"\" Load all BPASS spectra from files. Notes ----- The first time", "to chose to load the data into a numpy arrays as well or", "\"\"\" Load One SED file \"\"\" return pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL', '6.0', '6.1',", "line. continue # This line contains the imf probability and the type elif", "_hrTL(path): \"\"\" Load HR diagrams (TL type) \"\"\" # 'a' is just a", "Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name = \"log_age\" # load supernova count", "have to reload hoki for your new path to take effect. \"\"\" deprecation_msg", "a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path): \"\"\" Load One HR diagrams (Tg type) \"\"\" a", "exist, or its path is incorrect.\" if 'UV' in path: return _UV_nebular_lines(path) elif", "\"Available options are: 'TL', 'Tg', 'TTG'. \" if \"supernova\" in path: return _sn_rates(path)", "step is decided according to the value of type # To know what", "path: return _stellar_masses(path) elif \"spectra\" in path: return _sed(path) elif \"ioniz\" in path:", "# Check if compiled spectra are already present in data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"):", "set! Available options are: 'TL', 'Tg', 'TTG'. \") def _sn_rates(path): \"\"\" Loads One", "at this file:' '\\n'+path_to_settings) ######################## # LOAD DUMMY VARIABLE # ######################## def dummy_to_dataframe(filename,", "input data. Returns ------- \"\"\" assert isinstance(path, str), \"The location of the file", "stellar models this could be a directory named something like bpass-v2.2-newmodels and the", "and hr_type == 'TL': return _hrTL(path) elif \"hrs\" in path and hr_type ==", "Event Rate values | 11.0 | \"\"\" # Check population type star =", "array containing all the BPASS spectra for a specific imf and binary or", "str Path to the file containing the input data. Returns ------- \"\"\" assert", "inv_dict ={v: k for k, v in dummy_dicts[bpass_version].items()} cols = [inv_dict[key] if key", "its path is incorrect.\" assert hr_type in [None,'TL', 'Tg', 'TTG'], \"HOKI ERROR: The", "'Ic', 'LGRB', 'PISNe', 'low_mass', 'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'],", "Use the binary files or just the single stars. Default=True imf : `str`", "containing all the BPASS emissivities (Nion [1/s], L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A])", "range(96)] dummy = pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python') return dummy ######################### # MODEL INPUT", "'unpickle'] data_path = pkg_resources.resource_filename('hoki', 'data') ######################## # GENERAL LOAD HELPERS # ######################## def", "'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path, skiprows=1,", "-- Could not load the Stellar Population output. \" \"\\nDEBUGGING ASSISTANT:\\n1) Is the", "imf and binary or single star population. Usage: spectra[1][2][1000] (gives L_\\\\odot for Z=0.0001", "values and putting them in their respective lists elements = l.split() mixedimf.append(elements[0]) mixedage.append(elements[1])", "is not a BPASS IMF. Please select a correct IMF.\\n\"\\ \"These can be", "moved to the hoki.constants module -- In future versions of hoki\" \\ \"calling", "'Optical' in path: return _optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW',", "import hoki.data_compilers import warnings from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError # TODO: Should I", "\"These can be found in the documentation of this function.\") # Create the", "their rightful lists. elements = l.split() initialBH.append(elements[0]) initialP.append(elements[0]) # We then reset the", "engine='python', names=['WL', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8', '6.9', '7.0',", "`\"imf170_300\"` Returns ------- `pandas.DataFrame` (51, (8,13)) (log_age, (event_types, metallicity) A pandas MultiIndex dataframe", "pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name = \"log_age\" # load supernova count files for num,", "\"\"\" Load all BPASS emissivities from files. Notes ----- The first time this", ": `str` BPASS Identifier of the IMF to be used. The accepted IMF", "hoki for your new path to take effect. \"\"\" deprecation_msg = \"set_models_path has", "to read the next line so we set the vector accordingly. row =", "'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+',", "is run on a folder it will generate an npy file containing all", "It stores the file in the same folder with the name: `all_ionizing-[bin/sin]-[imf].npy`. The", "(\"Ia\", 0.02)] Notes ----- This dataframe has the following structure. The index is", "'6.3', '6.4', '6.5', '6.6', '6.7', '6.8', '6.9', '7.0', '7.1', '7.2', '7.3', '7.4', '7.5',", "| 6.0 | | ... | Event Rate values | 11.0 | \"\"\"", "'He_wind', 'Z_wind', 'E_wind', 'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python') def _stellar_masses(path): \"\"\" Load One", "data_path+'/settings.yaml' with open(path_to_settings, 'r') as stream: settings = yaml.safe_load(stream) settings['models_path'] = path with", "'10.6', '10.7', '10.8', '10.9', '11.0']) def _ionizing_flux(path): \"\"\" Load One ionizing flux file", "incorrect.\" if 'UV' in path: return _UV_nebular_lines(path) elif 'Optical' in path: return _optical_nebular_lines(path)", "function is run on a folder it will generate an npy file containing", "star population. Usage: spectra[1][2][0] (gives Nion for Z=0.0001 and log_age=6.2) \"\"\" # Check", "The location of the file is expected to be a string.\" assert os.path.isfile(path),", "path with io.open(path_to_settings, 'w', encoding='utf8') as outfile: yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True) print('Looks like", "import * import os import yaml import io import pickle import pkg_resources import", "in the same folder with the name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities are just read", "the whole file and filled our lists, we can put them in a", "'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL', 'WN_lL',", "binary else \"sin\" # Check if the given IMF is in the accepted", "file not found. Data will be compiled.\") res = hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf,", "Path to the file containing the target data. hr_type : str, optional Type", "present in data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done", "Absolute path to the top level of the stellar models this could be", "Example: rates.loc[6.5, (\"Ia\", 0.02)] Notes ----- This dataframe has the following structure. The", "type star = \"bin\" if binary else \"sin\" # Check if the given", "\"\"\" Load One HR diagrams (T/TG type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100),", "'7.6', '7.7', '7.8', '7.9', '8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6', '8.7', '8.8',", "file in the same folder with the name: `all_spectra-[bin/sin]-[imf].pkl` The spectra are just", "Input ----- data_path : `str` The path to the folder containing the BPASS", "files for num, metallicity in enumerate(BPASS_METALLICITIES): data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data =", "\"\"\" Load One stellar type number file into a dataframe \"\"\" return pd.read_csv(path,", "file in the same folder with the name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities are just", "HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI ERROR: The path provided does not correspond to a", "'H_wind', 'He_wind', 'Z_wind', 'E_wind', 'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python') def _stellar_masses(path): \"\"\" Load", "# The next line will contain the imf probability and the type -", "probability and the type - we reset the vector.. row = [0, 1,", "[1, 0, 0, 0] if row[3]: row[2] = 0 continue elif row[3]: #", "_stellar_masses(path) elif \"spectra\" in path: return _sed(path) elif \"ioniz\" in path: return _ionizing_flux(path)", "the vector accordingly. row = [0, 0, 1, 0] initialBH.append(np.nan) initialP.append(np.nan) continue elif", "0, 0, 0] if row[3]: row[2] = 0 continue elif row[3]: # This", "'10.3', '10.4', '10.5', '10.6', '10.7', '10.8', '10.9', '11.0']) def _ionizing_flux(path): \"\"\" Load One", "frame good enough? __all__ = ['model_input', 'model_output', 'set_models_path', 'unpickle'] data_path = pkg_resources.resource_filename('hoki', 'data')", "res = hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf, binary=binary ) emissivities = res.output return emissivities", "numpy array containing all the BPASS spectra for a specific imf and binary", "colour file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'V-I', 'U', 'B', 'V', 'R',", "spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") # Otherwise compile else: print(\"Compiled file not found.", "IMF identifiers are: - `\"imf_chab100\"` - `\"imf_chab300\"` - `\"imf100_100\"` - `\"imf100_300\"` - `\"imf135_100\"`", "has the following structure. The index is the log_age as a float. The", "file and put them in a dataframe Parameters ---------- path : str Path", "data binary : `bool` Use the binary files or just the single stars.", "a = np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path): \"\"\" Load One", "lists elements = l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then depending on whether we need", "'10.2', '10.3', '10.4', '10.5', '10.6', '10.7', '10.8', '10.9', '11.0']) def _ionizing_flux(path): \"\"\" Load", "population type star = \"bin\" if binary else \"sin\" # Check if the", "pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL',", "In future versions of hoki\" \\ \"calling set_models_path from hoki.load will fail\" warnings.warn(deprecation_msg,", "Load One HR diagrams (T/TG type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100),", "manual row = [1, 0, 0, 0] # All potential input parameters and", "(51, (8,13)) (log_age, (event_types, metallicity) A pandas MultiIndex dataframe containing the BPASS number", "ERROR: The path provided does not correspond to a valid directory' path_to_settings =", "Supernova rate file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP',", "on a folder it will generate an npy file containing all the BPASS", "or is the # data frame good enough? __all__ = ['model_input', 'model_output', 'set_models_path',", "4) [(metallicity, log_age, band)] A 3D numpy array containing all the BPASS emissivities", "already present in data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\")", "future. It stores the file in the same folder with the name: `all_ionizing-[bin/sin]-[imf].npy`.", "is the log_age as a float. The column is a `pandas.MultiIndex` with the", "the other potential inputs are NaNs and we go back to the #", "| Event Rate values | 11.0 | \"\"\" # Check population type star", "IMF to be used. The accepted IMF identifiers are: - `\"imf_chab100\"` - `\"imf_chab300\"`", "= model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] =", "the BPASS spectra. binary : `bool` Use the binary files or just the", "use what we should do with this line. for l in lines[1:]: #", "spectra. binary : `bool` Use the binary files or just the single stars.", "'8.3', '8.4', '8.5', '8.6', '8.7', '8.8', '8.9', '9.0', '9.1', '9.2', '9.3', '9.4', '9.5',", "Load One stellar type number file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\",", "return pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7',", "as outfile: yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True) print('Looks like everything went well! You can", "contain the imf probability and the type - we reset the vector.. row", "'PISNe', 'low_mass', 'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python') def", "information Parameters ---------- path Returns ------- \"\"\" assert isinstance(path, str), \"HOKI ERROR: The", "\"\"\" Load One ionizing flux file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'prod_rate',", "# Create the output DataFrame arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product( arrays,", "and binary or single star population. Usage: spectra[1][2][0] (gives Nion for Z=0.0001 and", "io.open(path_to_settings, 'w', encoding='utf8') as outfile: yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True) print('Looks like everything went", "... and we skip the rest to read in the next line. continue", "`all_ionizing-[bin/sin]-[imf].npy`. The emissivities are just read from file and not normalised. Input -----", "== 4: # If the type is 4 we need all the other", "# swap metallicity and event type return rates.swaplevel(0, 1, axis=1) def spectra_all_z(data_path, imf,", "|Metallicity| 0.00001 | 0.00001 | ... | 0.04 | 0.04 | | log_age", "'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL',", "in dummy_dicts[bpass_version].items()} cols = [inv_dict[key] if key in inv_dict.keys() else 'Nan'+str(key) for key", "assert os.path.isfile(path), 'File not found.' return pickle.load(open(path, 'rb')) # TODO: Deprecation warning def", "# Once we've goe through the whole file and filled our lists, we", "'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW',", "Parameters ---------- path : str Path to the file containing the target data.", "effect. \"\"\" deprecation_msg = \"set_models_path has been moved to the hoki.constants module --", "depending on whether we need the next line for more inputs # we", "warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI ERROR: The path provided does not correspond to", "for l in lines[1:]: # This line contains the filename. if row[0]: filenames.append(l)", "for key in range(96)] dummy = pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python') return dummy #########################", "skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines = ['model_num', 'logU', 'log_nH', 'log_age', 'HeII1640_F',", "----- The first time this function is run on a folder it will", "of the IMF to be used. The accepted IMF identifiers are: - `\"imf_chab100\"`", "we've goe through the whole file and filled our lists, we can put", "np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") # Compile the spectra for faster reading next time otherwise", "`float`) and the metallicity (level=1, `float`) |Event Type | Ia | IIP |", "check IMF key if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not", "contains the imf probability and the type elif row[1]: elements = l.split() #", "'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python') def _stellar_numbers(path): \"\"\" Load One stellar type", "print(\"HOKI ERROR -- Could not load the Stellar Population output. \" \"\\nDEBUGGING ASSISTANT:\\n1)", "values | 11.0 | \"\"\" # Check population type star = \"bin\" if", "= path with io.open(path_to_settings, 'w', encoding='utf8') as outfile: yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True) print('Looks", "'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW',", "path to the top level of the stellar models this could be a", "axis=1) def spectra_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS spectra from files. Notes", "'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW',", "engine='python') def _hrTL(path): \"\"\" Load HR diagrams (TL type) \"\"\" # 'a' is", "names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python') def _hrTL(path): \"\"\" Load HR diagrams (TL type) \"\"\"", "hoki.constants module -- In future versions of hoki\" \\ \"calling set_models_path from hoki.load", "expected to be a string.\") # Check if compiled spectra are already present", "BPASS spectra. binary : `bool` Use the binary files or just the single", "########################## # MODEL OUTPUT FUNCTIONS # ########################## def model_output(path, hr_type=None): \"\"\" Loads a", "rates_all_z(data_path, imf, binary=True): \"\"\" Loads the BPASS supernova event files. Notes ----- The", "into the 2 values modelimfs.append(elements[0]) # and append them modeltypes.append(elements[1]) # The next", "######################## # GENERAL LOAD HELPERS # ######################## def unpickle(path): \"\"\"Extract pickle files\"\"\" assert", "and hr_type == 'Tg': return _hrTg(path) elif \"hrs\" in path and hr_type ==", "we set the vector to either go back to reading a filename or", "event type return rates.swaplevel(0, 1, axis=1) def spectra_all_z(data_path, imf, binary=True): \"\"\" Load all", "This file does not exist, or its path is incorrect.\" assert hr_type in", "a specific imf and binary or single star population. Usage: spectra[1][2][0] (gives Nion", "fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI ERROR: The path provided does not correspond", "return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path): \"\"\" Load One HR diagrams (T/TG", "Load HR diagrams (TL type) \"\"\" # 'a' is just a place order", "pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location of", "'K', 'u', 'g', 'r', 'i', 'z', 'f300w', 'f336w', 'f435w', 'f450w', 'f555w', 'f606w', 'f814w',", "- `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- spectra : `numpy.ndarray` (13, 51,", "'f336w', 'f435w', 'f450w', 'f555w', 'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV']) ########################## # NEBULAR", "will tell use what we should do with this line. for l in", "'6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8', '6.9', '7.0', '7.1', '7.2', '7.3', '7.4',", "IMFs if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not a BPASS", "raise HokiTypeError(\"The folder location is expected to be a string.\") # Check if", "The column is a `pandas.MultiIndex` with the event types (level=0, `float`) and the", "containing all the BPASS spectra per metallicity for faster loading in the future.", "key in range(96)] dummy = pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python') return dummy ######################### #", "------- spectra : `numpy.ndarray` (13, 51, 100000) [(metallicity, log_age, wavelength)] A 3D numpy", "'TTG'. \" if \"supernova\" in path: return _sn_rates(path) elif \"numbers\" in path: return", "return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind', 'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python')", "load supernova count files for num, metallicity in enumerate(BPASS_METALLICITIES): data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\"", "row[0]: filenames.append(l) # The next line will contain the imf probability and the", "'NUV']) def _colours(path): \"\"\" Load One colour file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python',", "`\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- emissivities : `numpy.ndarray` (N_Z,", ": `str` The path to the folder containing the BPASS spectra. binary :", "HR diagram to load: 'TL', 'Tg' or 'TTG'. Returns ------- Output Data :", "a numpy arrays as well or is the # data frame good enough?", "implements the tools to easily load BPASS data. \"\"\" import pandas as pd", "GENERAL LOAD HELPERS # ######################## def unpickle(path): \"\"\"Extract pickle files\"\"\" assert os.path.isfile(path), 'File", "enumerate(BPASS_METALLICITIES): data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num],", "'9.6', '9.7', '9.8', '9.9', '10.0', '10.1', '10.2', '10.3', '10.4', '10.5', '10.6', '10.7', '10.8',", "the # data frame good enough? __all__ = ['model_input', 'model_output', 'set_models_path', 'unpickle'] data_path", "type is invalid. \" \\ \"Available options are: 'TL', 'Tg', 'TTG'. \" if", "all the other outputs, so we need the next 2 lines. # We", "_sn_rates(path) elif \"numbers\" in path: return _stellar_numbers(path) elif \"yields\" in path: return _yields(path)", "pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind', 'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python') def", "the file in the same folder with the name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities are", "modelimfs = [] modeltypes = [] mixedimf = [] mixedage = [] initialBH", "to the hoki.constants module -- In future versions of hoki\" \\ \"calling set_models_path", "we split the line into the 2 values modelimfs.append(elements[0]) # and append them", "the BPASS emissivities (Nion [1/s], L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for a", "= pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name =", "= data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy() # swap metallicity and", "will contain the imf probability and the type - we reset the vector..", "in range(96)] dummy = pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python') return dummy ######################### # MODEL", "'6.9', '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '8.0', '8.1',", "'File not found.' return pickle.load(open(path, 'rb')) # TODO: Deprecation warning def set_models_path(path): \"\"\"", "[a,b,c,d] in the BPASS manual row = [1, 0, 0, 0] # All", "datatypes to strings and numbers. input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs, 'types': modeltypes,", "BPASS spectra per metallicity for faster loading in the future. It stores the", "and hr_type == 'TTG': return _hrTTG(path) else: print(\"HOKI ERROR -- Could not load", "'9.7', '9.8', '9.9', '10.0', '10.1', '10.2', '10.3', '10.4', '10.5', '10.6', '10.7', '10.8', '10.9',", "\"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL',", "One yields file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind',", "IMF.\") # check if data_path is a string if not isinstance(data_path, str): raise", "names=['log_age', 'V-I', 'U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'u', 'g', 'r',", "'age_yrs'], engine='python') def _stellar_numbers(path): \"\"\" Load One stellar type number file into a", "'11.0']) def _ionizing_flux(path): \"\"\" Load One ionizing flux file \"\"\" return pd.read_csv(path, sep=r'\\s+',", "`bool` Use the binary files or just the single stars. Default=True imf :", "elif row[3]: # This reads the last possible pair of inputs and puts", "binary files or just the single stars. Default=True imf : `str` BPASS Identifier", "initialBH.append(elements[0]) initialP.append(elements[0]) # We then reset the vector to be reading in filenames", "file containing the target data. hr_type : str, optional Type of HR diagram", "[(metallicity, log_age, wavelength)] A 3D numpy array containing all the BPASS spectra for", "metallicity per type. Usage: rates.loc[log_age, (type, metallicity)] Example: rates.loc[6.5, (\"Ia\", 0.02)] Notes -----", "flux file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV']) def", "slice(None))] = data.to_numpy() # swap metallicity and event type return rates.swaplevel(0, 1, axis=1)", "'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F',", "file in an array of shape (45900,100) a = np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100),", "res.output return emissivities ################# # # ################# def _do_not_use(): import webbrowser url =", "be a directory named something like bpass-v2.2-newmodels and the next level down should", "'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python') def _stellar_numbers(path): \"\"\" Load One", "no corresponding value for a particular model, will append a NaN filenames =", "whole file in an array of shape (45900,100) a = np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100),", "single star population. Usage: spectra[1][2][1000] (gives L_\\\\odot for Z=0.0001 and log_age=6.2 at 999", "A 3D numpy array containing all the BPASS spectra for a specific imf", "elements = l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then depending on whether we need the", "a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path): \"\"\" Load One SED file \"\"\" return pd.read_csv(path, sep=r\"\\s+\",", "location of the file is expected to be a string.\" assert os.path.isfile(path), \"HOKI:", "beginning to read a new file name row = [1, 0, 0, 0]", "'NEWBINMODS' and 'NEWSINMODS'. Notes ----- You are going to have to reload hoki", "index is the log_age as a float. The column is a `pandas.MultiIndex` with", "the documentation of this function.\") # Create the output DataFrame arrays = [BPASS_NUM_METALLICITIES,", ": `numpy.ndarray` (N_Z, N_age, 4) [(metallicity, log_age, band)] A 3D numpy array containing", "If the type is 4 we need all the other outputs, so we", "are just read from file and not normalised. Input ----- data_path : `str`", ": `str` The path to the folder containing the BPASS files. binary :", "type. Usage: rates.loc[log_age, (type, metallicity)] Example: rates.loc[6.5, (\"Ia\", 0.02)] Notes ----- This dataframe", "return _optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F',", "= [] initialP = [] # This goes through the file line by", "data_path, data_path, imf, binary=binary ) emissivities = res.output return emissivities ################# # #", "\"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path): \"\"\" Load", "last possible pair of inputs and puts them in their rightful lists. elements", "down should contain 'NEWBINMODS' and 'NEWSINMODS'. Notes ----- You are going to have", "them in a dataframe Parameters ---------- path : str Path to the file", "\"\"\" Loads the BPASS supernova event files. Notes ----- The rates are just", "is not a BPASS IMF. Please select a correct IMF.\") # check if", "into a numpy arrays as well or is the # data frame good", "is set! Available options are: 'TL', 'Tg', 'TTG'. \") def _sn_rates(path): \"\"\" Loads", "files\"\"\" assert os.path.isfile(path), 'File not found.' return pickle.load(open(path, 'rb')) # TODO: Deprecation warning", "int, 'mixed_imf': float, 'mixed_age': float, 'initial_BH': float, 'initial_P': float}) return input_df ########################## #", "----- The first time this function is ran on a folder it will", "if int(elements[1]) < 2: # In this case all the other potential inputs", "emissivities from files. Notes ----- The first time this function is run on", "columns and set the datatypes to strings and numbers. input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1],", "a string.\") # Check if compiled spectra are already present in data folder", "[] initialBH = [] initialP = [] # This goes through the file", "elif \"ioniz\" in path: return _ionizing_flux(path) elif \"colour\" in path: return _colours(path) elif", "= res.output return emissivities ################# # # ################# def _do_not_use(): import webbrowser url", "<reponame>HeloiseS/hoki \"\"\" This module implements the tools to easily load BPASS data. \"\"\"", "'u', 'g', 'r', 'i', 'z', 'f300w', 'f336w', 'f435w', 'f450w', 'f555w', 'f606w', 'f814w', 'prod_rate',", "is 2 or 3, we know the initial BH and initial P will", "the accepted IMFs if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not", "correct IMF.\") # check if data_path is a string if not isinstance(data_path, str):", "in path: return _yields(path) elif \"starmass\" in path: return _stellar_masses(path) elif \"spectra\" in", "'I', 'J', 'H', 'K', 'u', 'g', 'r', 'i', 'z', 'f300w', 'f336w', 'f435w', 'f450w',", "in dummy to df from a filename\"\"\" inv_dict ={v: k for k, v", "0, 1, 0] initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) == 4: # If the", "dataframe containing the BPASS number of events per metallicity per type. Usage: rates.loc[log_age,", "BPASS number of events per metallicity per type. Usage: rates.loc[log_age, (type, metallicity)] Example:", "BPASS_IMFS: raise HokiKeyError( f\"{imf} is not a BPASS IMF. Please select a correct", "to probe those inputs. if not row[3]: row = [1, 0, 0, 0]", "0, 0] if row[3]: row[2] = 0 continue elif row[3]: # This reads", "BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy() # swap metallicity and event type return", "11.0 | \"\"\" # Check population type star = \"bin\" if binary else", "in a dataframe # with some named columns and set the datatypes to", "_hrTg(path) elif \"hrs\" in path and hr_type == 'TTG': return _hrTTG(path) else: print(\"HOKI", "the Stellar Population output. \" \"\\nDEBUGGING ASSISTANT:\\n1) Is the filename correct?\" \"\\n2) Trying", "time this function is run on a folder it will generate an npy", "'TTG'. \") def _sn_rates(path): \"\"\" Loads One Supernova rate file into a dataframe", "np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path): \"\"\" Load One HR diagrams", "row[2] = 0 continue elif row[3]: # This reads the last possible pair", "`str` The path to the folder containing the BPASS spectra. binary : `bool`", "sep=r'\\s+', engine='python', names=column_UV_em_lines) ##################################### # BPASS Load over all metallicity # ##################################### def", "`all_spectra-[bin/sin]-[imf].pkl` The spectra are just read from file and not normalised. Input -----", "a string if not isinstance(data_path, str): raise HokiTypeError(\"The folder location is expected to", "data_path : `str` The path to the folder containing the BPASS files. binary", "exist, or its path is incorrect.\" assert hr_type in [None,'TL', 'Tg', 'TTG'], \"HOKI", "# We then reset the vector to be reading in filenames because no", "we need the next line for more inputs # we set the vector", "0, 0, 0] # Once we've goe through the whole file and filled", "def spectra_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS spectra from files. Notes -----", "we go back to the # beginning to read a new file name", "\"HOKI ERROR: The HR diagram type is invalid. \" \\ \"Available options are:", "to be a string.\" assert os.path.isfile(path), \"HOKI: ERROR This file does not exist,", "6.0 | | ... | Event Rate values | 11.0 | \"\"\" #", "sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python') def _hrTL(path): \"\"\" Load HR diagrams (TL type)", "# check IMF key if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is", "and event type return rates.swaplevel(0, 1, axis=1) def spectra_all_z(data_path, imf, binary=True): \"\"\" Load", "else \"sin\" # check IMF key if imf not in BPASS_IMFS: raise HokiKeyError(", "To know what each value means, consult the BPASS manual if int(elements[1]) <", "vector accordingly. row = [0, 0, 1, 1] continue elif row[2]: # Splitting", "# ######################### def model_input(path): \"\"\" Loads inputs from one file and put them", "= data.to_numpy() # swap metallicity and event type return rates.swaplevel(0, 1, axis=1) def", "the input data. Returns ------- \"\"\" assert isinstance(path, str), \"The location of the", "'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python') def _yields(path): \"\"\" Load One yields file", "| 0.04 | | log_age |---------|----------|------|-------|----------| | 6.0 | | ... | Event", "what each value means, consult the BPASS manual if int(elements[1]) < 2: #", "`\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- spectra : `numpy.ndarray` (13, 51, 100000)", "does not exist, or its path is incorrect.\" assert hr_type in [None,'TL', 'Tg',", "'9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '9.7', '9.8', '9.9', '10.0', '10.1', '10.2', '10.3',", "HokiTypeError(\"The folder location is expected to be a string.\") # check if compiled", "settings = yaml.safe_load(stream) settings['models_path'] = path with io.open(path_to_settings, 'w', encoding='utf8') as outfile: yaml.dump(settings,", "_hrTL(path) elif \"hrs\" in path and hr_type == 'Tg': return _hrTg(path) elif \"hrs\"", "2 lines. # We set the vector accordingly. row = [0, 0, 1,", "some named columns and set the datatypes to strings and numbers. input_df =", "tell use what we should do with this line. for l in lines[1:]:", "(gives Nion for Z=0.0001 and log_age=6.2) \"\"\" # Check population type star =", "'data') ######################## # GENERAL LOAD HELPERS # ######################## def unpickle(path): \"\"\"Extract pickle files\"\"\"", "'LGRB', 'PISNe', 'low_mass', 'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python')", "float. The column is a `pandas.MultiIndex` with the event types (level=0, `float`) and", "return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path): \"\"\" Load One HR diagrams (Tg", "is expected to be a string.\" assert os.path.isfile(path), f\"File {path} does not exist,", "numpy arrays as well or is the # data frame good enough? __all__", "'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return", "4: # If the type is 4 we need all the other outputs,", "_optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW',", "'initial_P': initialP}).astype({'filenames': str, 'model_imf': float, 'types': int, 'mixed_imf': float, 'mixed_age': float, 'initial_BH': float,", "= [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS,", "type return rates.swaplevel(0, 1, axis=1) def spectra_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS", "load: 'TL', 'Tg' or 'TTG'. Returns ------- Output Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams", "in path and hr_type == 'TL': return _hrTL(path) elif \"hrs\" in path and", "and append them modeltypes.append(elements[1]) # The next step is decided according to the", "= [1, 0, 0, 0] # All potential input parameters and filename #", "append a NaN filenames = [] modelimfs = [] modeltypes = [] mixedimf", "'8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6', '8.7', '8.8', '8.9', '9.0', '9.1', '9.2',", "l in lines[1:]: # This line contains the filename. if row[0]: filenames.append(l) #", "initial BH and initial P will be NaN # but we also need", "mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then depending on whether we need the next line for", "Z=0.0001 and log_age=6.2 at 999 Angstrom) \"\"\" # Check population type star =", "rest to read in the next line. continue # This line contains the", "engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines = ['model_num', 'logU', 'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F',", "incorrect.\" assert hr_type in [None,'TL', 'Tg', 'TTG'], \"HOKI ERROR: The HR diagram type", "`\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- spectra : `numpy.ndarray` (13,", "str Path to the file containing the target data. hr_type : str, optional", "P will be NaN # but we also need to read the next", "be NaN # but we also need to read the next line so", "continue elif row[3]: # This reads the last possible pair of inputs and", "from hoki.constants import * import os import yaml import io import pickle import", "= pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name = \"log_age\" # load supernova count files for", "the path to the stellar models in hoki's settings Parameters ---------- path :", "chose to load the data into a numpy arrays as well or is", "This module implements the tools to easily load BPASS data. \"\"\" import pandas", "respective lists elements = l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then depending on whether we", "The index is the log_age as a float. The column is a `pandas.MultiIndex`", "dummy ######################### # MODEL INPUT FUNCTIONS # ######################### def model_input(path): \"\"\" Loads inputs", "f\"{imf} is not a BPASS IMF. Please select a correct IMF.\\n\"\\ \"These can", "population. Usage: spectra[1][2][0] (gives Nion for Z=0.0001 and log_age=6.2) \"\"\" # Check population", "engine='python') def _stellar_masses(path): \"\"\" Load One stellar masses file into a dataframe \"\"\"", "A 3D numpy array containing all the BPASS emissivities (Nion [1/s], L_Halpha [ergs/s],", "population. Usage: spectra[1][2][1000] (gives L_\\\\odot for Z=0.0001 and log_age=6.2 at 999 Angstrom) \"\"\"", "Load all BPASS emissivities from files. Notes ----- The first time this function", "\"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV']) def _colours(path): \"\"\"", "is expected to be a string.\") # check if compiled file exists if", "# Check population type star = \"bin\" if binary else \"sin\" # Check", "Load One yields file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind',", "precompiled file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") # Compile the spectra for faster", "Usage: spectra[1][2][1000] (gives L_\\\\odot for Z=0.0001 and log_age=6.2 at 999 Angstrom) \"\"\" #", "l.split() # we split the line into the 2 values modelimfs.append(elements[0]) # and", "import os import yaml import io import pickle import pkg_resources import hoki.data_compilers import", "those inputs. if not row[3]: row = [1, 0, 0, 0] if row[3]:", "in a dataframe Parameters ---------- path : str Path to the file containing", "print(\"Compiled file not found. Data will be compiled\") spec = hoki.data_compilers.SpectraCompiler( data_path, data_path,", "of the file is expected to be a string.\" assert os.path.isfile(path), f\"File {path}", "# TODO: Should I allow people to chose to load the data into", "column is a `pandas.MultiIndex` with the event types (level=0, `float`) and the metallicity", "Path to the file containing the input data. Returns ------- \"\"\" assert isinstance(path,", "Please select a correct IMF.\") # check if data_path is a string if", "['model_input', 'model_output', 'set_models_path', 'unpickle'] data_path = pkg_resources.resource_filename('hoki', 'data') ######################## # GENERAL LOAD HELPERS", "names=['WL', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8', '6.9', '7.0', '7.1',", "'WN_lL', 'WC_lL'], engine='python') def _yields(path): \"\"\" Load One yields file into a dataframe", "(13, 51, 100000) [(metallicity, log_age, wavelength)] A 3D numpy array containing all the", "settings Parameters ---------- path : str, Absolute path to the top level of", "probe those inputs. if not row[3]: row = [1, 0, 0, 0] if", "next line so we set the vector accordingly. row = [0, 0, 1,", "'8.7', '8.8', '8.9', '9.0', '9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '9.7', '9.8', '9.9',", "other potential inputs are NaNs and we go back to the # beginning", "a BPASS output file Parameters ---------- path : str Path to the file", "does not exist, or its path is incorrect.\" lines = open(path).read().split(\"\\n\") # rows", "NaNs and we go back to the # beginning to read a new", "= \"bin\" if binary else \"sin\" # Check if the given IMF is", "= spec.output return spectra def emissivities_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS emissivities", "are coming row = [1, 0, 0, 0] # Once we've goe through", "into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL',", "found in the documentation of this function.\") # Create the output DataFrame arrays", "def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy to df from a filename\"\"\" inv_dict ={v:", "potential input parameters and filename # If there is no corresponding value for", "2 or 3, we know the initial BH and initial P will be", "correct?\" \"\\n2) Trying to load an HR diagram? \" \"Make sure hr_type is", "3D numpy array containing all the BPASS spectra for a specific imf and", "`numpy.ndarray` (N_Z, N_age, 4) [(metallicity, log_age, band)] A 3D numpy array containing all", "= \"bin\" if binary else \"sin\" # check IMF key if imf not", "# If the type is 4 we need all the other outputs, so", "all the BPASS spectra for a specific imf and binary or single star", "(N_Z, N_age, 4) [(metallicity, log_age, band)] A 3D numpy array containing all the", "filename # If there is no corresponding value for a particular model, will", "A pandas MultiIndex dataframe containing the BPASS number of events per metallicity per", "an HR diagram? \" \"Make sure hr_type is set! Available options are: 'TL',", "'mixed_imf': mixedimf, 'mixed_age': mixedage, 'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames': str, 'model_imf': float, 'types': int,", "we skip the rest to read in the next line. continue # This", "# We set the vector accordingly. row = [0, 0, 1, 1] continue", "inputs are NaNs and we go back to the # beginning to read", "the folder containing the BPASS data binary : `bool` Use the binary files", "= hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf, binary=binary ) emissivities = res.output return emissivities #################", "######################## # LOAD DUMMY VARIABLE # ######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy", "their respective lists elements = l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then depending on whether", "= pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs, 'types': modeltypes, 'mixed_imf': mixedimf, 'mixed_age': mixedage, 'initial_BH': initialBH,", "nebular_lines(path): \"\"\" Load the nebular line output information Parameters ---------- path Returns -------", "imf probability and the type elif row[1]: elements = l.split() # we split", "shape (45900,100) a = np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path): \"\"\"", "lines. # We set the vector accordingly. row = [0, 0, 1, 1]", "file:' '\\n'+path_to_settings) ######################## # LOAD DUMMY VARIABLE # ######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads", "to the top level of the stellar models this could be a directory", "order which contains the whole file in an array of shape (45900,100) a", "to have to reload hoki for your new path to take effect. \"\"\"", "L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for a specific imf and binary or single star", "`\"imf170_100\"` - `\"imf170_300\"` Returns ------- spectra : `numpy.ndarray` (13, 51, 100000) [(metallicity, log_age,", "data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))]", "= hoki.data_compilers.SpectraCompiler( data_path, data_path, imf, binary=binary ) spectra = spec.output return spectra def", "file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV']) def _colours(path):", "folder with the name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities are just read from file and", "go back to the # beginning to read a new file name row", "file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL',", "next time otherwise else: print(\"Compiled file not found. Data will be compiled.\") res", "'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW']", "string.\" assert os.path.isfile(path), f\"File {path} does not exist, or its path is incorrect.\"", "not correspond to a valid directory' path_to_settings = data_path+'/settings.yaml' with open(path_to_settings, 'r') as", "is a `pandas.MultiIndex` with the event types (level=0, `float`) and the metallicity (level=1,", "\\ \"Available options are: 'TL', 'Tg', 'TTG'. \" if \"supernova\" in path: return", "'V-I', 'U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'u', 'g', 'r', 'i',", "the filename. if row[0]: filenames.append(l) # The next line will contain the imf", "load BPASS data. \"\"\" import pandas as pd import hoki.hrdiagrams as hr from", "'6.6', '6.7', '6.8', '6.9', '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8',", "and binary or single star population. Usage: spectra[1][2][1000] (gives L_\\\\odot for Z=0.0001 and", "spec.output return spectra def emissivities_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS emissivities from", "Load One HR diagrams (Tg type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100),", "This reads the last possible pair of inputs and puts them in their", "stellar type number file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL',", "row = [1, 0, 0, 0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1])", "path_to_settings = data_path+'/settings.yaml' with open(path_to_settings, 'r') as stream: settings = yaml.safe_load(stream) settings['models_path'] =", "if data_path is a string if not isinstance(data_path, str): raise HokiTypeError(\"The folder location", "[inv_dict[key] if key in inv_dict.keys() else 'Nan'+str(key) for key in range(96)] dummy =", "emissivities = res.output return emissivities ################# # # ################# def _do_not_use(): import webbrowser", "... | Event Rate values | 11.0 | \"\"\" # Check population type", "the given IMF is in the accepted IMFs if imf not in BPASS_IMFS:", "modelimfs, 'types': modeltypes, 'mixed_imf': mixedimf, 'mixed_age': mixedage, 'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames': str, 'model_imf':", "One HR diagrams (T/TG type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100),", "options are: 'TL', 'Tg', 'TTG'. \" if \"supernova\" in path: return _sn_rates(path) elif", "containing the BPASS data binary : `bool` Use the binary files or just", "a BPASS IMF. Please select a correct IMF.\") # check if data_path is", "a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path): \"\"\" Load One HR diagrams (Tg type) \"\"\"", "imf, binary=True): \"\"\" Load all BPASS emissivities from files. Notes ----- The first", "The path to the folder containing the BPASS files. binary : `bool` Use", "the following structure. The index is the log_age as a float. The column", "for k, v in dummy_dicts[bpass_version].items()} cols = [inv_dict[key] if key in inv_dict.keys() else", "elif \"colour\" in path: return _colours(path) elif \"hrs\" in path and hr_type ==", "def unpickle(path): \"\"\"Extract pickle files\"\"\" assert os.path.isfile(path), 'File not found.' return pickle.load(open(path, 'rb'))", "'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python') def _stellar_numbers(path): \"\"\" Load One stellar", "'WC_hL', 'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python') def", "emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") # Compile the spectra for faster reading next", "binary=binary ) emissivities = res.output return emissivities ################# # # ################# def _do_not_use():", "folder with the name: `all_spectra-[bin/sin]-[imf].pkl` The spectra are just read from file and", "for faster loading in the future. It stores the file in the same", "in the same folder with the name: `all_spectra-[bin/sin]-[imf].pkl` The spectra are just read", "to take effect. \"\"\" deprecation_msg = \"set_models_path has been moved to the hoki.constants", "continue elif int(elements[1]) != 4: # If type is 2 or 3, we", "'logU', 'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW',", "or 'TTG'. Returns ------- Output Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object \"\"\" assert", "row[1]: elements = l.split() # we split the line into the 2 values", "data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy() # swap metallicity and event", "in path and hr_type == 'Tg': return _hrTg(path) elif \"hrs\" in path and", "star = \"bin\" if binary else \"sin\" # Check if the given IMF", "DataFrame arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event Type\"]) rates", "'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW']", "dummy_dicts[bpass_version].items()} cols = [inv_dict[key] if key in inv_dict.keys() else 'Nan'+str(key) for key in", "is expected to be a string.\") # Check if compiled spectra are already", "elements = l.split() # we split the line into the 2 values modelimfs.append(elements[0])", "location of the file is expected to be a string.\" assert os.path.isfile(path), \"HOKI", "rates.index.name = \"log_age\" # load supernova count files for num, metallicity in enumerate(BPASS_METALLICITIES):", "float, 'types': int, 'mixed_imf': float, 'mixed_age': float, 'initial_BH': float, 'initial_P': float}) return input_df", "'9.5', '9.6', '9.7', '9.8', '9.9', '10.0', '10.1', '10.2', '10.3', '10.4', '10.5', '10.6', '10.7',", "the BPASS files. binary : `bool` Use the binary files or just the", "l.split() initialBH.append(elements[0]) initialP.append(elements[0]) # We then reset the vector to be reading in", "string.\" assert os.path.isfile(path), \"HOKI: ERROR This file does not exist, or its path", "should do with this line. for l in lines[1:]: # This line contains", "we can put them in a dataframe # with some named columns and", "----- data_path : `str` The path to the folder containing the BPASS files.", "yaml.safe_load(stream) settings['models_path'] = path with io.open(path_to_settings, 'w', encoding='utf8') as outfile: yaml.dump(settings, outfile, default_flow_style=False,", "lists. elements = l.split() initialBH.append(elements[0]) initialP.append(elements[0]) # We then reset the vector to", "# load supernova count files for num, metallicity in enumerate(BPASS_METALLICITIES): data = model_output(", "continue elif int(elements[1]) == 4: # If the type is 4 we need", "- `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- emissivities", "the BPASS manual if int(elements[1]) < 2: # In this case all the", "place order which contains the whole file in an array of shape (45900,100)", "# The vector will tell use what we should do with this line.", "elif \"hrs\" in path and hr_type == 'Tg': return _hrTg(path) elif \"hrs\" in", "'8.9', '9.0', '9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '9.7', '9.8', '9.9', '10.0', '10.1',", "a `pandas.MultiIndex` with the event types (level=0, `float`) and the metallicity (level=1, `float`)", "hr_type=None): \"\"\" Loads a BPASS output file Parameters ---------- path : str Path", "line output information Parameters ---------- path Returns ------- \"\"\" assert isinstance(path, str), \"HOKI", "'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW',", "data_path : `str` The path to the folder containing the BPASS spectra. binary", "# Splitting the two values and putting them in their respective lists elements", "name: `all_spectra-[bin/sin]-[imf].pkl` The spectra are just read from file and not normalised. Input", "'WNH_lL', 'WN_lL', 'WC_lL'], engine='python') def _yields(path): \"\"\" Load One yields file into a", "arrays, names=[\"Metallicicty\", \"Event Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name = \"log_age\" #", "function is ran on a folder it will generate a pickle file containing", "'R', 'I', 'J', 'H', 'K', 'u', 'g', 'r', 'i', 'z', 'f300w', 'f336w', 'f435w',", "NaN filenames = [] modelimfs = [] modeltypes = [] mixedimf = []", "Angstrom) \"\"\" # Check population type star = \"bin\" if binary else \"sin\"", "path and hr_type == 'TTG': return _hrTTG(path) else: print(\"HOKI ERROR -- Could not", "| | ... | Event Rate values | 11.0 | \"\"\" # Check", "# This reads the last possible pair of inputs and puts them in", "Identifier of the IMF to be used. The accepted IMF identifiers are: -", "data frame good enough? __all__ = ['model_input', 'model_output', 'set_models_path', 'unpickle'] data_path = pkg_resources.resource_filename('hoki',", "'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV']) ########################## # NEBULAR EMISSION LINES # ##########################", "a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL', 'K_hL',", "\"Make sure hr_type is set! Available options are: 'TL', 'Tg', 'TTG'. \") def", "\"bin\" if binary else \"sin\" # check IMF key if imf not in", "hoki.data_compilers import warnings from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError # TODO: Should I allow", "----- This dataframe has the following structure. The index is the log_age as", "data. \"\"\" import pandas as pd import hoki.hrdiagrams as hr from hoki.constants import", "not found.' return pickle.load(open(path, 'rb')) # TODO: Deprecation warning def set_models_path(path): \"\"\" Changes", "value for a particular model, will append a NaN filenames = [] modelimfs", "in the next line. continue # This line contains the imf probability and", "per metallicity for faster loading in the future. It stores the file in", "just read from file and not normalised. Input ----- data_path : `str` The", "= [0, 0, 1, 0] initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) == 4: #", "print('Looks like everything went well! You can check the path was correctly updated", "the tools to easily load BPASS data. \"\"\" import pandas as pd import", "'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path,", "BPASS IMF. Please select a correct IMF.\\n\"\\ \"These can be found in the", "need to read the next line so we set the vector accordingly. row", "pkg_resources import hoki.data_compilers import warnings from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError # TODO: Should", "type - we reset the vector.. row = [0, 1, 0, 0] #", "if key in inv_dict.keys() else 'Nan'+str(key) for key in range(96)] dummy = pd.read_csv(filename,", "Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object \"\"\" assert isinstance(path, str), \"HOKI ERROR: The", "else: print(\"Compiled file not found. Data will be compiled\") spec = hoki.data_compilers.SpectraCompiler( data_path,", "the folder containing the BPASS files. binary : `bool` Use the binary files", "OUTPUT FUNCTIONS # ########################## def model_output(path, hr_type=None): \"\"\" Loads a BPASS output file", "next step is decided according to the value of type # To know", "'7.5', '7.6', '7.7', '7.8', '7.9', '8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6', '8.7',", "in path: return _colours(path) elif \"hrs\" in path and hr_type == 'TL': return", "`str` The filepath to the folder containing the BPASS data binary : `bool`", "< 2: # In this case all the other potential inputs are NaNs", "event types (level=0, `float`) and the metallicity (level=1, `float`) |Event Type | Ia", "for num, metallicity in enumerate(BPASS_METALLICITIES): data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data = data.loc[:,", "print(\"Compiled file not found. Data will be compiled.\") res = hoki.data_compilers.EmissivityCompiler( data_path, data_path,", "line will contain the imf probability and the type - we reset the", "'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW',", "versions of hoki\" \\ \"calling set_models_path from hoki.load will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert", "the # beginning to read a new file name row = [1, 0,", "of hoki\" \\ \"calling set_models_path from hoki.load will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path),", "the file containing the target data. hr_type : str, optional Type of HR", "'E_wind', 'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python') def _stellar_masses(path): \"\"\" Load One stellar masses", "# ########################## def nebular_lines(path): \"\"\" Load the nebular line output information Parameters ----------", "\"\"\" Load One stellar masses file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\",", "back to the # beginning to read a new file name row =", "count files for num, metallicity in enumerate(BPASS_METALLICITIES): data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data", "elif row[1]: elements = l.split() # we split the line into the 2", "##################################### # BPASS Load over all metallicity # ##################################### def rates_all_z(data_path, imf, binary=True):", "the single stars. Default=True imf : `str` BPASS Identifier of the IMF to", "[0, 0, 1, 1] continue elif row[2]: # Splitting the two values and", "BPASS IMF. Please select a correct IMF.\") # check if data_path is a", "'B', 'V', 'R', 'I', 'J', 'H', 'K', 'u', 'g', 'r', 'i', 'z', 'f300w',", "spectra_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS spectra from files. Notes ----- The", "decided according to the value of type # To know what each value", "| Ia | IIP | ... | PISNe | low_mass | |Metallicity| 0.00001", "is a string if not isinstance(data_path, str): raise HokiTypeError(\"The folder location is expected", "goes through the file line by line. Using .split is not possible/convenient #", "number of events per metallicity per type. Usage: rates.loc[log_age, (type, metallicity)] Example: rates.loc[6.5,", "type number file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL',", "(level=0, `float`) and the metallicity (level=1, `float`) |Event Type | Ia | IIP", "ERROR -- Could not load the Stellar Population output. \" \"\\nDEBUGGING ASSISTANT:\\n1) Is", "one file and put them in a dataframe Parameters ---------- path : str", "'model_imf': modelimfs, 'types': modeltypes, 'mixed_imf': mixedimf, 'mixed_age': mixedage, 'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames': str,", "ERROR: The location of the file is expected to be a string.\" assert", "population type star = \"bin\" if binary else \"sin\" # check IMF key", "path: return _yields(path) elif \"starmass\" in path: return _stellar_masses(path) elif \"spectra\" in path:", "data into a numpy arrays as well or is the # data frame", "def _stellar_numbers(path): \"\"\" Load One stellar type number file into a dataframe \"\"\"", "'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines) #####################################", "HokiKeyError( f\"{imf} is not a BPASS IMF. Please select a correct IMF.\") #", "return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines = ['model_num', 'logU', 'log_nH',", "line. for l in lines[1:]: # This line contains the filename. if row[0]:", "a folder it will generate a pickle file containing all the BPASS spectra", "are: 'TL', 'Tg', 'TTG'. \") def _sn_rates(path): \"\"\" Loads One Supernova rate file", "as a float. The column is a `pandas.MultiIndex` with the event types (level=0,", "the next line so we set the vector accordingly. row = [0, 0,", "pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines) ##################################### # BPASS Load over all metallicity #", "the vector accordingly. row = [0, 0, 1, 1] continue elif row[2]: #", "'6.8', '6.9', '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '8.0',", "the data into a numpy arrays as well or is the # data", "return rates.swaplevel(0, 1, axis=1) def spectra_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS spectra", "0, 0] # Once we've goe through the whole file and filled our", "sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL',", "it will generate a pickle file containing all the BPASS spectra per metallicity", "emissivities : `numpy.ndarray` (N_Z, N_age, 4) [(metallicity, log_age, band)] A 3D numpy array", "binary or single star population. Usage: spectra[1][2][0] (gives Nion for Z=0.0001 and log_age=6.2)", "be compiled.\") res = hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf, binary=binary ) emissivities = res.output", "outfile, default_flow_style=False, allow_unicode=True) print('Looks like everything went well! You can check the path", "in the documentation of this function.\") # Create the output DataFrame arrays =", "'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW',", "= [0, 1, 0, 0] # ... and we skip the rest to", "'initial_P': float}) return input_df ########################## # MODEL OUTPUT FUNCTIONS # ########################## def model_output(path,", "path to the folder containing the BPASS files. binary : `bool` Use the", "like bpass-v2.2-newmodels and the next level down should contain 'NEWBINMODS' and 'NEWSINMODS'. Notes", "it will generate an npy file containing all the BPASS emissivities for faster", "_yields(path): \"\"\" Load One yields file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\",", "as hr from hoki.constants import * import os import yaml import io import", "Loads One Supernova rate file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age',", "The path to the folder containing the BPASS spectra. binary : `bool` Use", "and put them in a dataframe Parameters ---------- path : str Path to", "FUNCTIONS # ######################### def model_input(path): \"\"\" Loads inputs from one file and put", "named columns and set the datatypes to strings and numbers. input_df = pd.DataFrame.from_dict({'filenames':", "data_path : `str` The filepath to the folder containing the BPASS data binary", "putting them in their respective lists elements = l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then", "a string.\" assert os.path.isfile(path), \"HOKI: ERROR This file does not exist, or its", "all the other potential inputs are NaNs and we go back to the", "- `\"imf100_100\"` - `\"imf100_300\"` - `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` -", "this case all the other potential inputs are NaNs and we go back", "correspond to a valid directory' path_to_settings = data_path+'/settings.yaml' with open(path_to_settings, 'r') as stream:", "be compiled\") spec = hoki.data_compilers.SpectraCompiler( data_path, data_path, imf, binary=binary ) spectra = spec.output", "to the file containing the input data. Returns ------- \"\"\" assert isinstance(path, str),", "\"\"\" import pandas as pd import hoki.hrdiagrams as hr from hoki.constants import *", "raise HokiKeyError( f\"{imf} is not a BPASS IMF. Please select a correct IMF.\")", "def rates_all_z(data_path, imf, binary=True): \"\"\" Loads the BPASS supernova event files. Notes -----", "spectra for a specific imf and binary or single star population. Usage: spectra[1][2][1000]", "put them in a dataframe # with some named columns and set the", "'HeI4686_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines = ['model_num', 'logU',", "if 'UV' in path: return _UV_nebular_lines(path) elif 'Optical' in path: return _optical_nebular_lines(path) def", "= yaml.safe_load(stream) settings['models_path'] = path with io.open(path_to_settings, 'w', encoding='utf8') as outfile: yaml.dump(settings, outfile,", "'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python') def _stellar_numbers(path): \"\"\"", "BPASS emissivities from files. Notes ----- The first time this function is run", "| \"\"\" # Check population type star = \"bin\" if binary else \"sin\"", "def _sed(path): \"\"\" Load One SED file \"\"\" return pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL',", "to the value of type # To know what each value means, consult", "does not exist, or its path is incorrect.\" if 'UV' in path: return", "array of shape (45900,100) a = np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def", "skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines) ##################################### # BPASS Load over all metallicity # #####################################", "== 'Tg': return _hrTg(path) elif \"hrs\" in path and hr_type == 'TTG': return", "its path is incorrect.\" if 'UV' in path: return _UV_nebular_lines(path) elif 'Optical' in", "hr_type='Tg') def _hrTTG(path): \"\"\" Load One HR diagrams (T/TG type) \"\"\" a =", "a particular model, will append a NaN filenames = [] modelimfs = []", "we should do with this line. for l in lines[1:]: # This line", "Loads inputs from one file and put them in a dataframe Parameters ----------", "given IMF is in the accepted IMFs if imf not in BPASS_IMFS: raise", "normalised. Input ----- data_path : `str` The filepath to the folder containing the", "supernova event files. Notes ----- The rates are just read from file and", "def set_models_path(path): \"\"\" Changes the path to the stellar models in hoki's settings", "an npy file containing all the BPASS emissivities for faster loading in the", "return _ionizing_flux(path) elif \"colour\" in path: return _colours(path) elif \"hrs\" in path and", "star population. Usage: spectra[1][2][1000] (gives L_\\\\odot for Z=0.0001 and log_age=6.2 at 999 Angstrom)", "or to probe those inputs. if not row[3]: row = [1, 0, 0,", "\"hrs\" in path and hr_type == 'TL': return _hrTL(path) elif \"hrs\" in path", "to the # beginning to read a new file name row = [1,", "elif \"starmass\" in path: return _stellar_masses(path) elif \"spectra\" in path: return _sed(path) elif", "_sn_rates(path): \"\"\" Loads One Supernova rate file into a dataframe \"\"\" return pd.read_csv(path,", "Please select a correct IMF.\\n\"\\ \"These can be found in the documentation of", "`\"imf_chab100\"` - `\"imf_chab300\"` - `\"imf100_100\"` - `\"imf100_300\"` - `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"`", "'7.7', '7.8', '7.9', '8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6', '8.7', '8.8', '8.9',", "at 999 Angstrom) \"\"\" # Check population type star = \"bin\" if binary", "---------- path Returns ------- \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location of", "'8.2', '8.3', '8.4', '8.5', '8.6', '8.7', '8.8', '8.9', '9.0', '9.1', '9.2', '9.3', '9.4',", "the type elif row[1]: elements = l.split() # we split the line into", "other outputs, so we need the next 2 lines. # We set the", "_sed(path): \"\"\" Load One SED file \"\"\" return pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL', '6.0',", "'J', 'H', 'K', 'u', 'g', 'r', 'i', 'z', 'f300w', 'f336w', 'f435w', 'f450w', 'f555w',", "'prod_rate', 'halpha', 'FUV', 'NUV']) ########################## # NEBULAR EMISSION LINES # ########################## def nebular_lines(path):", "'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path,", "the file is expected to be a string.\" assert os.path.isfile(path), \"HOKI ERROR: This", "return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV']) def _colours(path): \"\"\" Load", "'logU', 'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW',", "0.04 | 0.04 | | log_age |---------|----------|------|-------|----------| | 6.0 | | ... |", "| ... | Event Rate values | 11.0 | \"\"\" # Check population", "\"\"\" assert isinstance(path, str), \"HOKI ERROR: The location of the file is expected", "Parameters ---------- path : str, Absolute path to the top level of the", "a specific imf and binary or single star population. Usage: spectra[1][2][1000] (gives L_\\\\odot", "\\ \"calling set_models_path from hoki.load will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI ERROR:", "either go back to reading a filename or to probe those inputs. if", "1] continue elif row[2]: # Splitting the two values and putting them in", "numpy array containing all the BPASS emissivities (Nion [1/s], L_Halpha [ergs/s], L_FUV [ergs/s/A],", "contains the whole file in an array of shape (45900,100) a = np.loadtxt(path)", "BPASS supernova event files. Notes ----- The rates are just read from file", "Check population type star = \"bin\" if binary else \"sin\" # check IMF", "'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return", "path Returns ------- \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location of the", "of shape (45900,100) a = np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path):", "0, 0] # All potential input parameters and filename # If there is", "spectra[1][2][0] (gives Nion for Z=0.0001 and log_age=6.2) \"\"\" # Check population type star", "are: 'TL', 'Tg', 'TTG'. \" if \"supernova\" in path: return _sn_rates(path) elif \"numbers\"", "IMF key if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not a", "with the name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities are just read from file and not", "file and not normalised. Input ----- data_path : `str` The filepath to the", "to the folder containing the BPASS data binary : `bool` Use the binary", "output. \" \"\\nDEBUGGING ASSISTANT:\\n1) Is the filename correct?\" \"\\n2) Trying to load an", "into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP', 'II', 'Ib', 'Ic',", "_UV_nebular_lines(path): column_UV_em_lines = ['model_num', 'logU', 'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW',", "and not normalised. Input ----- data_path : `str` The filepath to the folder", "we also need to read the next line so we set the vector", "'TL', 'Tg', 'TTG'. \" if \"supernova\" in path: return _sn_rates(path) elif \"numbers\" in", "3D numpy array containing all the BPASS emissivities (Nion [1/s], L_Halpha [ergs/s], L_FUV", "sep=r'\\s+', engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines = ['model_num', 'logU', 'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW',", "consult the BPASS manual if int(elements[1]) < 2: # In this case all", "INPUT FUNCTIONS # ######################### def model_input(path): \"\"\" Loads inputs from one file and", "if row[3]: row[2] = 0 continue elif row[3]: # This reads the last", "import io import pickle import pkg_resources import hoki.data_compilers import warnings from hoki.utils.exceptions import", "goe through the whole file and filled our lists, we can put them", "'B_hL', 'A_hL', 'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL',", "names=column_UV_em_lines) ##################################### # BPASS Load over all metallicity # ##################################### def rates_all_z(data_path, imf,", "corresponding value for a particular model, will append a NaN filenames = []", "`\"imf170_100\"` - `\"imf170_300\"` Returns ------- emissivities : `numpy.ndarray` (N_Z, N_age, 4) [(metallicity, log_age,", "per type. Usage: rates.loc[log_age, (type, metallicity)] Example: rates.loc[6.5, (\"Ia\", 0.02)] Notes ----- This", "= [] modelimfs = [] modeltypes = [] mixedimf = [] mixedage =", "Deprecation warning def set_models_path(path): \"\"\" Changes the path to the stellar models in", "future. It stores the file in the same folder with the name: `all_spectra-[bin/sin]-[imf].pkl`", "and not normalised. Input ----- data_path : `str` The path to the folder", "location is expected to be a string.\") # check if compiled file exists", "'Nan'+str(key) for key in range(96)] dummy = pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python') return dummy", "with the name: `all_spectra-[bin/sin]-[imf].pkl` The spectra are just read from file and not", "If type is 2 or 3, we know the initial BH and initial", "== 'TL': return _hrTL(path) elif \"hrs\" in path and hr_type == 'Tg': return", "reading next time otherwise else: print(\"Compiled file not found. Data will be compiled.\")", "check if data_path is a string if not isinstance(data_path, str): raise HokiTypeError(\"The folder", "BH and initial P will be NaN # but we also need to", ") emissivities = res.output return emissivities ################# # # ################# def _do_not_use(): import", "read in the next line. continue # This line contains the imf probability", "directory' path_to_settings = data_path+'/settings.yaml' with open(path_to_settings, 'r') as stream: settings = yaml.safe_load(stream) settings['models_path']", "imf and binary or single star population. Usage: spectra[1][2][0] (gives Nion for Z=0.0001", "from hoki.load will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI ERROR: The path provided", "vector accordingly. row = [0, 0, 1, 0] initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1])", "input_df ########################## # MODEL OUTPUT FUNCTIONS # ########################## def model_output(path, hr_type=None): \"\"\" Loads", ": str, Absolute path to the top level of the stellar models this", "def _stellar_masses(path): \"\"\" Load One stellar masses file into a dataframe \"\"\" return", "file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") # Otherwise compile else: print(\"Compiled file not", "if row[0]: filenames.append(l) # The next line will contain the imf probability and", "assert os.path.isfile(path), \"HOKI: ERROR This file does not exist, or its path is", "# ######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy to df from a filename\"\"\"", "hr_type : str, optional Type of HR diagram to load: 'TL', 'Tg' or", "to load an HR diagram? \" \"Make sure hr_type is set! Available options", "options are: 'TL', 'Tg', 'TTG'. \") def _sn_rates(path): \"\"\" Loads One Supernova rate", "int(elements[1]) < 2: # In this case all the other potential inputs are", "'TTG': return _hrTTG(path) else: print(\"HOKI ERROR -- Could not load the Stellar Population", "\"\"\" Load One HR diagrams (Tg type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100),", "l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then depending on whether we need the next line", "normalised. Input ----- data_path : `str` The path to the folder containing the", "os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") # Compile the spectra", "continue # This line contains the imf probability and the type elif row[1]:", "is just a place order which contains the whole file in an array", "diagrams (Tg type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def", "Usage: rates.loc[log_age, (type, metallicity)] Example: rates.loc[6.5, (\"Ia\", 0.02)] Notes ----- This dataframe has", "data_path = pkg_resources.resource_filename('hoki', 'data') ######################## # GENERAL LOAD HELPERS # ######################## def unpickle(path):", "unpickle(path): \"\"\"Extract pickle files\"\"\" assert os.path.isfile(path), 'File not found.' return pickle.load(open(path, 'rb')) #", "into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind', 'E_sn',", "|---------|----------|------|-------|----------| | 6.0 | | ... | Event Rate values | 11.0 |", "BPASS data. \"\"\" import pandas as pd import hoki.hrdiagrams as hr from hoki.constants", "or 3, we know the initial BH and initial P will be NaN", "'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames': str, 'model_imf': float, 'types': int, 'mixed_imf': float, 'mixed_age': float,", "key if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not a BPASS", "'A_lL', 'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python') def _yields(path): \"\"\" Load One", "0.00001 | ... | 0.04 | 0.04 | | log_age |---------|----------|------|-------|----------| | 6.0", "model_output(path, hr_type=None): \"\"\" Loads a BPASS output file Parameters ---------- path : str", "- `\"imf_chab100\"` - `\"imf_chab300\"` - `\"imf100_100\"` - `\"imf100_300\"` - `\"imf135_100\"` - `\"imf135_300\"` -", "row[3]: row[2] = 0 continue elif row[3]: # This reads the last possible", "to be a string.\") # check if compiled file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading", "BPASS spectra for a specific imf and binary or single star population. Usage:", "is no corresponding value for a particular model, will append a NaN filenames", "pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP', 'II', 'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass', 'e_Ia', 'e_IIP',", "'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines = ['model_num',", "'A_hL', 'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL', 'K_lL',", "this line. for l in lines[1:]: # This line contains the filename. if", "or its path is incorrect.\" lines = open(path).read().split(\"\\n\") # rows [a,b,c,d] in the", "# beginning to read a new file name row = [1, 0, 0,", "encoding='utf8') as outfile: yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True) print('Looks like everything went well! You", "file \"\"\" return pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5',", "'halpha', 'FUV', 'NUV']) ########################## # NEBULAR EMISSION LINES # ########################## def nebular_lines(path): \"\"\"", "'9.2', '9.3', '9.4', '9.5', '9.6', '9.7', '9.8', '9.9', '10.0', '10.1', '10.2', '10.3', '10.4',", "={v: k for k, v in dummy_dicts[bpass_version].items()} cols = [inv_dict[key] if key in", "metallicity for faster loading in the future. It stores the file in the", "and the type - we reset the vector.. row = [0, 1, 0,", "output file Parameters ---------- path : str Path to the file containing the", "'f435w', 'f450w', 'f555w', 'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV']) ########################## # NEBULAR EMISSION", ": `bool` Use the binary files or just the single stars. Default=True imf", "each value means, consult the BPASS manual if int(elements[1]) < 2: # In", "path : str, Absolute path to the top level of the stellar models", "= \"log_age\" # load supernova count files for num, metallicity in enumerate(BPASS_METALLICITIES): data", "will be compiled\") spec = hoki.data_compilers.SpectraCompiler( data_path, data_path, imf, binary=binary ) spectra =", "ERROR: This file does not exist, or its path is incorrect.\" assert hr_type", "probability and the type elif row[1]: elements = l.split() # we split the", "import pandas as pd import hoki.hrdiagrams as hr from hoki.constants import * import", "found. Data will be compiled.\") res = hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf, binary=binary )", "'i', 'z', 'f300w', 'f336w', 'f435w', 'f450w', 'f555w', 'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV'])", "and we go back to the # beginning to read a new file", "Usage: spectra[1][2][0] (gives Nion for Z=0.0001 and log_age=6.2) \"\"\" # Check population type", "return _stellar_masses(path) elif \"spectra\" in path: return _sed(path) elif \"ioniz\" in path: return", "'TTG'], \"HOKI ERROR: The HR diagram type is invalid. \" \\ \"Available options", "not row[3]: row = [1, 0, 0, 0] if row[3]: row[2] = 0", "os import yaml import io import pickle import pkg_resources import hoki.data_compilers import warnings", "This goes through the file line by line. Using .split is not possible/convenient", "the top level of the stellar models this could be a directory named", "sep=r'\\s+', engine='python', names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV']) def _colours(path): \"\"\" Load One colour", "pickle import pkg_resources import hoki.data_compilers import warnings from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError #", "1, 1] continue elif row[2]: # Splitting the two values and putting them", "string.\") # Check if compiled spectra are already present in data folder if", "the type - we reset the vector.. row = [0, 1, 0, 0]", "be reading in filenames because no more inputs are coming row = [1,", "mixedage, 'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames': str, 'model_imf': float, 'types': int, 'mixed_imf': float, 'mixed_age':", "the initial BH and initial P will be NaN # but we also", "'Tg', 'TTG'. \") def _sn_rates(path): \"\"\" Loads One Supernova rate file into a", "imf : `str` BPASS Identifier of the IMF to be used. The accepted", "'WC_lL'], engine='python') def _yields(path): \"\"\" Load One yields file into a dataframe \"\"\"", "put them in a dataframe Parameters ---------- path : str Path to the", "0 continue elif row[3]: # This reads the last possible pair of inputs", "else: print(\"HOKI ERROR -- Could not load the Stellar Population output. \" \"\\nDEBUGGING", "# Otherwise compile else: print(\"Compiled file not found. Data will be compiled\") spec", "return input_df ########################## # MODEL OUTPUT FUNCTIONS # ########################## def model_output(path, hr_type=None): \"\"\"", "type) \"\"\" # 'a' is just a place order which contains the whole", "def emissivities_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS emissivities from files. Notes -----", "inputs and puts them in their rightful lists. elements = l.split() initialBH.append(elements[0]) initialP.append(elements[0])", "The HR diagram type is invalid. \" \\ \"Available options are: 'TL', 'Tg',", "optional Type of HR diagram to load: 'TL', 'Tg' or 'TTG'. Returns -------", "level of the stellar models this could be a directory named something like", "Returns ------- \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location of the file", "in inv_dict.keys() else 'Nan'+str(key) for key in range(96)] dummy = pd.read_csv(filename, names=cols, sep=r\"\\s+\",", "folder containing the BPASS spectra. binary : `bool` Use the binary files or", "- `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- `pandas.DataFrame` (51, (8,13)) (log_age, (event_types,", "\"calling set_models_path from hoki.load will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI ERROR: The", "* import os import yaml import io import pickle import pkg_resources import hoki.data_compilers", "emissivities (Nion [1/s], L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for a specific imf", "faster loading in the future. It stores the file in the same folder", "_colours(path): \"\"\" Load One colour file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'V-I',", "line contains the filename. if row[0]: filenames.append(l) # The next line will contain", "a = np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path): \"\"\" Load One", "the next 2 lines. # We set the vector accordingly. row = [0,", "dataframe # with some named columns and set the datatypes to strings and", "the vector.. row = [0, 1, 0, 0] # ... and we skip", "has been moved to the hoki.constants module -- In future versions of hoki\"", "= np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") # Otherwise compile else: print(\"Compiled file not found. Data", "whole file and filled our lists, we can put them in a dataframe", "= [1, 0, 0, 0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) !=", "over all metallicity # ##################################### def rates_all_z(data_path, imf, binary=True): \"\"\" Loads the BPASS", "raise HokiTypeError(\"The folder location is expected to be a string.\") # check if", "set the vector accordingly. row = [0, 0, 1, 1] continue elif row[2]:", "not found. Data will be compiled\") spec = hoki.data_compilers.SpectraCompiler( data_path, data_path, imf, binary=binary", "'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python') def _stellar_numbers(path): \"\"\" Load One stellar type number file", "Data will be compiled.\") res = hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf, binary=binary ) emissivities", "float, 'mixed_age': float, 'initial_BH': float, 'initial_P': float}) return input_df ########################## # MODEL OUTPUT", "'halpha', 'FUV', 'NUV']) def _colours(path): \"\"\" Load One colour file \"\"\" return pd.read_csv(path,", "tools to easily load BPASS data. \"\"\" import pandas as pd import hoki.hrdiagrams", "as stream: settings = yaml.safe_load(stream) settings['models_path'] = path with io.open(path_to_settings, 'w', encoding='utf8') as", "########################## # NEBULAR EMISSION LINES # ########################## def nebular_lines(path): \"\"\" Load the nebular", "value means, consult the BPASS manual if int(elements[1]) < 2: # In this", "diagram to load: 'TL', 'Tg' or 'TTG'. Returns ------- Output Data : pandas.DataFrame", "\" \"\\nDEBUGGING ASSISTANT:\\n1) Is the filename correct?\" \"\\n2) Trying to load an HR", "'f450w', 'f555w', 'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV']) ########################## # NEBULAR EMISSION LINES", "metallicity) A pandas MultiIndex dataframe containing the BPASS number of events per metallicity", "from a filename\"\"\" inv_dict ={v: k for k, v in dummy_dicts[bpass_version].items()} cols =", "def nebular_lines(path): \"\"\" Load the nebular line output information Parameters ---------- path Returns", "a valid directory' path_to_settings = data_path+'/settings.yaml' with open(path_to_settings, 'r') as stream: settings =", "to be a string.\") # Check if compiled spectra are already present in", "= [inv_dict[key] if key in inv_dict.keys() else 'Nan'+str(key) for key in range(96)] dummy", "initialP.append(np.nan) continue elif int(elements[1]) != 4: # If type is 2 or 3,", "load an HR diagram? \" \"Make sure hr_type is set! Available options are:", "# In this case all the other potential inputs are NaNs and we", "according to the value of type # To know what each value means,", "data. Returns ------- \"\"\" assert isinstance(path, str), \"The location of the file is", "Returns ------- spectra : `numpy.ndarray` (13, 51, 100000) [(metallicity, log_age, wavelength)] A 3D", "elif \"spectra\" in path: return _sed(path) elif \"ioniz\" in path: return _ionizing_flux(path) elif", "sep=r\"\\s+\", engine='python') return dummy ######################### # MODEL INPUT FUNCTIONS # ######################### def model_input(path):", "not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not a BPASS IMF. Please select", "(Tg type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path):", "path to the folder containing the BPASS spectra. binary : `bool` Use the", "Loads the BPASS supernova event files. Notes ----- The rates are just read", "found.' return pickle.load(open(path, 'rb')) # TODO: Deprecation warning def set_models_path(path): \"\"\" Changes the", "BPASS manual if int(elements[1]) < 2: # In this case all the other", "'remnant_mass'], engine='python') def _hrTL(path): \"\"\" Load HR diagrams (TL type) \"\"\" # 'a'", "single stars. Default=True imf : `str` BPASS Identifier of the IMF to be", "int(elements[1]) == 4: # If the type is 4 we need all the", "containing the BPASS number of events per metallicity per type. Usage: rates.loc[log_age, (type,", "model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy()", "is incorrect.\" if 'UV' in path: return _UV_nebular_lines(path) elif 'Optical' in path: return", "in data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\")", "I allow people to chose to load the data into a numpy arrays", "0] # All potential input parameters and filename # If there is no", "the imf probability and the type - we reset the vector.. row =", "the type is 4 we need all the other outputs, so we need", "# This line contains the filename. if row[0]: filenames.append(l) # The next line", "the two values and putting them in their respective lists elements = l.split()", "'TL': return _hrTL(path) elif \"hrs\" in path and hr_type == 'Tg': return _hrTg(path)", "as well or is the # data frame good enough? __all__ = ['model_input',", "Nion for Z=0.0001 and log_age=6.2) \"\"\" # Check population type star = \"bin\"", "'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python') def _yields(path): \"\"\" Load One yields", "spec = hoki.data_compilers.SpectraCompiler( data_path, data_path, imf, binary=binary ) spectra = spec.output return spectra", "stellar models in hoki's settings Parameters ---------- path : str, Absolute path to", "`\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- emissivities : `numpy.ndarray` (N_Z, N_age, 4)", "VARIABLE # ######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy to df from a", "following structure. The index is the log_age as a float. The column is", "potential inputs are NaNs and we go back to the # beginning to", "hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path): \"\"\" Load One HR diagrams (Tg type)", "0] initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) == 4: # If the type is", "\"\"\" Load One yields file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age',", "You can check the path was correctly updated by looking at this file:'", "The vector will tell use what we should do with this line. for", "Output Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object \"\"\" assert isinstance(path, str), \"HOKI ERROR:", "will be compiled.\") res = hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf, binary=binary ) emissivities =", "more inputs are coming row = [1, 0, 0, 0] # Once we've", "IIP | ... | PISNe | low_mass | |Metallicity| 0.00001 | 0.00001 |", "return _hrTTG(path) else: print(\"HOKI ERROR -- Could not load the Stellar Population output.", "per metallicity per type. Usage: rates.loc[log_age, (type, metallicity)] Example: rates.loc[6.5, (\"Ia\", 0.02)] Notes", "... | PISNe | low_mass | |Metallicity| 0.00001 | 0.00001 | ... |", "| ... | 0.04 | 0.04 | | log_age |---------|----------|------|-------|----------| | 6.0 |", "assert isinstance(path, str), \"HOKI ERROR: The location of the file is expected to", "not exist, or its path is incorrect.\" assert hr_type in [None,'TL', 'Tg', 'TTG'],", "51, 100000) [(metallicity, log_age, wavelength)] A 3D numpy array containing all the BPASS", "the datatypes to strings and numbers. input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs, 'types':", "raise HokiKeyError( f\"{imf} is not a BPASS IMF. Please select a correct IMF.\\n\"\\", "It stores the file in the same folder with the name: `all_spectra-[bin/sin]-[imf].pkl` The", "One HR diagrams (Tg type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100),", "reload hoki for your new path to take effect. \"\"\" deprecation_msg = \"set_models_path", "'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python') def _stellar_numbers(path): \"\"\" Load", "if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") # Otherwise compile", "\"\"\" Load the nebular line output information Parameters ---------- path Returns ------- \"\"\"", "This dataframe has the following structure. The index is the log_age as a", "Changes the path to the stellar models in hoki's settings Parameters ---------- path", "outfile: yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True) print('Looks like everything went well! You can check", "The accepted IMF identifiers are: - `\"imf_chab100\"` - `\"imf_chab300\"` - `\"imf100_100\"` - `\"imf100_300\"`", "(gives L_\\\\odot for Z=0.0001 and log_age=6.2 at 999 Angstrom) \"\"\" # Check population", "| ... | PISNe | low_mass | |Metallicity| 0.00001 | 0.00001 | ...", "binary or single star population. Usage: spectra[1][2][1000] (gives L_\\\\odot for Z=0.0001 and log_age=6.2", "'Ia', 'IIP', 'II', 'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass', 'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic',", "with this line. for l in lines[1:]: # This line contains the filename.", "\"hrs\" in path and hr_type == 'Tg': return _hrTg(path) elif \"hrs\" in path", "[1, 0, 0, 0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) != 4:", "'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV']) ########################## # NEBULAR EMISSION LINES # ########################## def", "sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP', 'II', 'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass', 'e_Ia', 'e_IIP', 'e_II',", "'r', 'i', 'z', 'f300w', 'f336w', 'f435w', 'f450w', 'f555w', 'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV',", "elif \"hrs\" in path and hr_type == 'TL': return _hrTL(path) elif \"hrs\" in", "0, 0, 0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) != 4: #", "[0, 1, 0, 0] # ... and we skip the rest to read", "isinstance(path, str), \"HOKI ERROR: The location of the file is expected to be", "slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy() # swap metallicity and event type", "----- The rates are just read from file and not normalised. Input -----", "set the vector to either go back to reading a filename or to", "is 4 we need all the other outputs, so we need the next", "L_NUV [ergs/s/A]) for a specific imf and binary or single star population. Usage:", "| |Metallicity| 0.00001 | 0.00001 | ... | 0.04 | 0.04 | |", "rate file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP', 'II',", "# MODEL INPUT FUNCTIONS # ######################### def model_input(path): \"\"\" Loads inputs from one", "the file line by line. Using .split is not possible/convenient # The vector", "hr_type is set! Available options are: 'TL', 'Tg', 'TTG'. \") def _sn_rates(path): \"\"\"", "to read a new file name row = [1, 0, 0, 0] mixedimf.append(0.0)", "print(\"Done Loading.\") # Compile the spectra for faster reading next time otherwise else:", "np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") # Otherwise compile else: print(\"Compiled file not found. Data will", "row = [1, 0, 0, 0] if row[3]: row[2] = 0 continue elif", "be a string.\") # check if compiled file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled", "HELPERS # ######################## def unpickle(path): \"\"\"Extract pickle files\"\"\" assert os.path.isfile(path), 'File not found.'", "dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL', 'K_hL', 'M_hL',", "######################### def model_input(path): \"\"\" Loads inputs from one file and put them in", "isinstance(data_path, str): raise HokiTypeError(\"The folder location is expected to be a string.\") #", "hr_type == 'TL': return _hrTL(path) elif \"hrs\" in path and hr_type == 'Tg':", "in the BPASS manual row = [1, 0, 0, 0] # All potential", "engine='python') def _stellar_numbers(path): \"\"\" Load One stellar type number file into a dataframe", "files. Notes ----- The first time this function is run on a folder", "split the line into the 2 values modelimfs.append(elements[0]) # and append them modeltypes.append(elements[1])", "3, we know the initial BH and initial P will be NaN #", "return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL', 'WN_hL',", "output information Parameters ---------- path Returns ------- \"\"\" assert isinstance(path, str), \"HOKI ERROR:", "the file containing the input data. Returns ------- \"\"\" assert isinstance(path, str), \"The", "we set the vector accordingly. row = [0, 0, 1, 0] initialBH.append(np.nan) initialP.append(np.nan)", "bpass-v2.2-newmodels and the next level down should contain 'NEWBINMODS' and 'NEWSINMODS'. Notes -----", "io import pickle import pkg_resources import hoki.data_compilers import warnings from hoki.utils.exceptions import HokiDeprecationWarning,", "\"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python') def _hrTL(path): \"\"\" Load HR", "and initial P will be NaN # but we also need to read", "return _UV_nebular_lines(path) elif 'Optical' in path: return _optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH',", "def _hrTg(path): \"\"\" Load One HR diagrams (Tg type) \"\"\" a = np.loadtxt(path)", "`float`) |Event Type | Ia | IIP | ... | PISNe | low_mass", "assert hr_type in [None,'TL', 'Tg', 'TTG'], \"HOKI ERROR: The HR diagram type is", "The filepath to the folder containing the BPASS data binary : `bool` Use", "the target data. hr_type : str, optional Type of HR diagram to load:", "to be reading in filenames because no more inputs are coming row =", "diagram? \" \"Make sure hr_type is set! Available options are: 'TL', 'Tg', 'TTG'.", "engine='python') def _yields(path): \"\"\" Load One yields file into a dataframe \"\"\" return", "__all__ = ['model_input', 'model_output', 'set_models_path', 'unpickle'] data_path = pkg_resources.resource_filename('hoki', 'data') ######################## # GENERAL", "named something like bpass-v2.2-newmodels and the next level down should contain 'NEWBINMODS' and", "possible/convenient # The vector will tell use what we should do with this", "_UV_nebular_lines(path) elif 'Optical' in path: return _optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age',", "a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP', 'II', 'Ib', 'Ic', 'LGRB',", "through the whole file and filled our lists, we can put them in", "columns = pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name", "ERROR: The HR diagram type is invalid. \" \\ \"Available options are: 'TL',", "The first time this function is ran on a folder it will generate", "\"sin\" # check IMF key if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf}", "if the given IMF is in the accepted IMFs if imf not in", "not exist, or its path is incorrect.\" if 'UV' in path: return _UV_nebular_lines(path)", "Parameters ---------- path : str Path to the file containing the input data.", "hoki\" \\ \"calling set_models_path from hoki.load will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI", "else: print(\"Compiled file not found. Data will be compiled.\") res = hoki.data_compilers.EmissivityCompiler( data_path,", "file does not exist, or its path is incorrect.\" assert hr_type in [None,'TL',", "if \"supernova\" in path: return _sn_rates(path) elif \"numbers\" in path: return _stellar_numbers(path) elif", "inputs from one file and put them in a dataframe Parameters ---------- path", "all the BPASS emissivities for faster loading in the future. It stores the", "'7.8', '7.9', '8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6', '8.7', '8.8', '8.9', '9.0',", "compile else: print(\"Compiled file not found. Data will be compiled\") spec = hoki.data_compilers.SpectraCompiler(", "'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F',", "4 we need all the other outputs, so we need the next 2", "... | 0.04 | 0.04 | | log_age |---------|----------|------|-------|----------| | 6.0 | |", "The next line will contain the imf probability and the type - we", "if compiled file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done", "the other outputs, so we need the next 2 lines. # We set", "going to have to reload hoki for your new path to take effect.", "arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event Type\"]) rates =", "and numbers. input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs, 'types': modeltypes, 'mixed_imf': mixedimf, 'mixed_age':", "filename. if row[0]: filenames.append(l) # The next line will contain the imf probability", "provided does not correspond to a valid directory' path_to_settings = data_path+'/settings.yaml' with open(path_to_settings,", "BPASS data binary : `bool` Use the binary files or just the single", "# ######################## def unpickle(path): \"\"\"Extract pickle files\"\"\" assert os.path.isfile(path), 'File not found.' return", "accordingly. row = [0, 0, 1, 0] initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) ==", "metallicity # ##################################### def rates_all_z(data_path, imf, binary=True): \"\"\" Loads the BPASS supernova event", "allow_unicode=True) print('Looks like everything went well! You can check the path was correctly", "the same folder with the name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities are just read from", "to load the data into a numpy arrays as well or is the", "modeltypes, 'mixed_imf': mixedimf, 'mixed_age': mixedage, 'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames': str, 'model_imf': float, 'types':", ") spectra = spec.output return spectra def emissivities_all_z(data_path, imf, binary=True): \"\"\" Load all", "read the next line so we set the vector accordingly. row = [0,", "| low_mass | |Metallicity| 0.00001 | 0.00001 | ... | 0.04 | 0.04", "[] mixedage = [] initialBH = [] initialP = [] # This goes", "lists, we can put them in a dataframe # with some named columns", "'TL', 'Tg' or 'TTG'. Returns ------- Output Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object", "'types': int, 'mixed_imf': float, 'mixed_age': float, 'initial_BH': float, 'initial_P': float}) return input_df ##########################", "are: - `\"imf_chab100\"` - `\"imf_chab300\"` - `\"imf100_100\"` - `\"imf100_300\"` - `\"imf135_100\"` - `\"imf135_300\"`", "engine='python') return dummy ######################### # MODEL INPUT FUNCTIONS # ######################### def model_input(path): \"\"\"", "The rates are just read from file and not normalised. Input ----- data_path", "path is incorrect.\" assert hr_type in [None,'TL', 'Tg', 'TTG'], \"HOKI ERROR: The HR", "spectra = spec.output return spectra def emissivities_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS", "Trying to load an HR diagram? \" \"Make sure hr_type is set! Available", "Notes ----- The first time this function is run on a folder it", "return _hrTg(path) elif \"hrs\" in path and hr_type == 'TTG': return _hrTTG(path) else:", "return emissivities ################# # # ################# def _do_not_use(): import webbrowser url = \"https://www.youtube.com/watch?v=dQw4w9WgXcQ\"", "Returns ------- \"\"\" assert isinstance(path, str), \"The location of the file is expected", "= [] mixedage = [] initialBH = [] initialP = [] # This", "'He_sn', 'Z_sn'], engine='python') def _stellar_masses(path): \"\"\" Load One stellar masses file into a", "------- \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location of the file is", "means, consult the BPASS manual if int(elements[1]) < 2: # In this case", "elif int(elements[1]) == 4: # If the type is 4 we need all", "rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name = \"log_age\" # load supernova count files", "specific imf and binary or single star population. Usage: spectra[1][2][0] (gives Nion for", "_yields(path) elif \"starmass\" in path: return _stellar_masses(path) elif \"spectra\" in path: return _sed(path)", "The path provided does not correspond to a valid directory' path_to_settings = data_path+'/settings.yaml'", "'Z_sn'], engine='python') def _stellar_masses(path): \"\"\" Load One stellar masses file into a dataframe", "Notes ----- You are going to have to reload hoki for your new", "'10.4', '10.5', '10.6', '10.7', '10.8', '10.9', '11.0']) def _ionizing_flux(path): \"\"\" Load One ionizing", "file containing all the BPASS emissivities for faster loading in the future. It", "sure hr_type is set! Available options are: 'TL', 'Tg', 'TTG'. \") def _sn_rates(path):", "0.04 | | log_age |---------|----------|------|-------|----------| | 6.0 | | ... | Event Rate", "are NaNs and we go back to the # beginning to read a", "will be NaN # but we also need to read the next line", "'6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8', '6.9', '7.0', '7.1', '7.2', '7.3',", "os.path.isdir(path), 'HOKI ERROR: The path provided does not correspond to a valid directory'", "files. binary : `bool` Use the binary files or just the single stars.", "set_models_path from hoki.load will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI ERROR: The path", "check if compiled file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\")", "The emissivities are just read from file and not normalised. Input ----- data_path", "- `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- emissivities : `numpy.ndarray`", "log_age=6.2) \"\"\" # Check population type star = \"bin\" if binary else \"sin\"", "binary=binary ) spectra = spec.output return spectra def emissivities_all_z(data_path, imf, binary=True): \"\"\" Load", "if compiled spectra are already present in data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled", "\" \\ \"Available options are: 'TL', 'Tg', 'TTG'. \" if \"supernova\" in path:", "vector.. row = [0, 1, 0, 0] # ... and we skip the", "containing the BPASS spectra. binary : `bool` Use the binary files or just", "'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines) def", "FUNCTIONS # ########################## def model_output(path, hr_type=None): \"\"\" Loads a BPASS output file Parameters", "path: return _colours(path) elif \"hrs\" in path and hr_type == 'TL': return _hrTL(path)", "the BPASS emissivities for faster loading in the future. It stores the file", "allow people to chose to load the data into a numpy arrays as", "well! You can check the path was correctly updated by looking at this", "in an array of shape (45900,100) a = np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100),", "pickle.load(open(path, 'rb')) # TODO: Deprecation warning def set_models_path(path): \"\"\" Changes the path to", "| | log_age |---------|----------|------|-------|----------| | 6.0 | | ... | Event Rate values", "return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'V-I', 'U', 'B', 'V', 'R', 'I', 'J', 'H',", "to the folder containing the BPASS files. binary : `bool` Use the binary", "low_mass | |Metallicity| 0.00001 | 0.00001 | ... | 0.04 | 0.04 |", "= np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path): \"\"\" Load One SED", "of type # To know what each value means, consult the BPASS manual", "for a specific imf and binary or single star population. Usage: spectra[1][2][1000] (gives", "LOAD DUMMY VARIABLE # ######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy to df", "line. Using .split is not possible/convenient # The vector will tell use what", "sep=r'\\s+', engine='python', names=['log_age', 'V-I', 'U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'u',", "`\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- `pandas.DataFrame` (51, (8,13)) (log_age, (event_types, metallicity)", "`pandas.DataFrame` (51, (8,13)) (log_age, (event_types, metallicity) A pandas MultiIndex dataframe containing the BPASS", "\"\"\" Changes the path to the stellar models in hoki's settings Parameters ----------", "in path: return _stellar_numbers(path) elif \"yields\" in path: return _yields(path) elif \"starmass\" in", "'7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '8.0', '8.1', '8.2',", "dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy to df from a filename\"\"\" inv_dict ={v: k", "Type of HR diagram to load: 'TL', 'Tg' or 'TTG'. Returns ------- Output", "return _stellar_numbers(path) elif \"yields\" in path: return _yields(path) elif \"starmass\" in path: return", "in lines[1:]: # This line contains the filename. if row[0]: filenames.append(l) # The", "\"\\nDEBUGGING ASSISTANT:\\n1) Is the filename correct?\" \"\\n2) Trying to load an HR diagram?", "Load One ionizing flux file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'prod_rate', 'halpha',", "\"log_age\" # load supernova count files for num, metallicity in enumerate(BPASS_METALLICITIES): data =", "vector to either go back to reading a filename or to probe those", "`\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- emissivities :", "strings and numbers. input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs, 'types': modeltypes, 'mixed_imf': mixedimf,", "path to the stellar models in hoki's settings Parameters ---------- path : str,", "initialBH, 'initial_P': initialP}).astype({'filenames': str, 'model_imf': float, 'types': int, 'mixed_imf': float, 'mixed_age': float, 'initial_BH':", "'w', encoding='utf8') as outfile: yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True) print('Looks like everything went well!", "pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines = ['model_num', 'logU', 'log_nH', 'log_age',", "---------- path : str Path to the file containing the input data. Returns", "faster reading next time otherwise else: print(\"Compiled file not found. Data will be", "'types': modeltypes, 'mixed_imf': mixedimf, 'mixed_age': mixedage, 'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames': str, 'model_imf': float,", "engine='python', names=['log_age', 'V-I', 'U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'u', 'g',", "'Z_wind', 'E_wind', 'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python') def _stellar_masses(path): \"\"\" Load One stellar", "into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python') def _hrTL(path):", "hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path): \"\"\" Load One HR diagrams (T/TG type)", "- `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- `pandas.DataFrame` (51, (8,13))", "0, 0, 0] # All potential input parameters and filename # If there", "is expected to be a string.\" assert os.path.isfile(path), \"HOKI ERROR: This file does", "\"\"\" return pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6',", "= [] initialBH = [] initialP = [] # This goes through the", "structure. The index is the log_age as a float. The column is a", "_sed(path) elif \"ioniz\" in path: return _ionizing_flux(path) elif \"colour\" in path: return _colours(path)", "two values and putting them in their respective lists elements = l.split() mixedimf.append(elements[0])", "Then depending on whether we need the next line for more inputs #", "is invalid. \" \\ \"Available options are: 'TL', 'Tg', 'TTG'. \" if \"supernova\"", "read a new file name row = [1, 0, 0, 0] mixedimf.append(0.0) mixedage.append(0.0)", "Loading.\") # Otherwise compile else: print(\"Compiled file not found. Data will be compiled\")", "next line will contain the imf probability and the type - we reset", "the output DataFrame arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event", "- `\"imf170_100\"` - `\"imf170_300\"` Returns ------- emissivities : `numpy.ndarray` (N_Z, N_age, 4) [(metallicity,", "initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) == 4: # If the type is 4", "ran on a folder it will generate a pickle file containing all the", "BPASS emissivities for faster loading in the future. It stores the file in", "return _yields(path) elif \"starmass\" in path: return _stellar_masses(path) elif \"spectra\" in path: return", "\"\"\" # Check population type star = \"bin\" if binary else \"sin\" #", "file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python') def", "'7.9', '8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6', '8.7', '8.8', '8.9', '9.0', '9.1',", "str), \"The location of the file is expected to be a string.\" assert", "next line for more inputs # we set the vector to either go", "# All potential input parameters and filename # If there is no corresponding", "hoki.hrdiagrams as hr from hoki.constants import * import os import yaml import io", "_hrTg(path): \"\"\" Load One HR diagrams (Tg type) \"\"\" a = np.loadtxt(path) return", "pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python') return dummy ######################### # MODEL INPUT FUNCTIONS # #########################", "BPASS Identifier of the IMF to be used. The accepted IMF identifiers are:", "Load One colour file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'V-I', 'U', 'B',", "print(\"Done Loading.\") # Otherwise compile else: print(\"Compiled file not found. Data will be", "| 0.04 | 0.04 | | log_age |---------|----------|------|-------|----------| | 6.0 | | ...", "return _hrTL(path) elif \"hrs\" in path and hr_type == 'Tg': return _hrTg(path) elif", "read from file and not normalised. Input ----- data_path : `str` The filepath", "row = [1, 0, 0, 0] # Once we've goe through the whole", "the BPASS spectra for a specific imf and binary or single star population.", "[ergs/s/A]) for a specific imf and binary or single star population. Usage: spectra[1][2][0]", "path: return _sn_rates(path) elif \"numbers\" in path: return _stellar_numbers(path) elif \"yields\" in path:", "will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI ERROR: The path provided does not", "##################################### def rates_all_z(data_path, imf, binary=True): \"\"\" Loads the BPASS supernova event files. Notes", "not a BPASS IMF. Please select a correct IMF.\") # check if data_path", "columns=columns, dtype=np.float64) rates.index.name = \"log_age\" # load supernova count files for num, metallicity", "'7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '8.0', '8.1', '8.2', '8.3', '8.4',", "we need all the other outputs, so we need the next 2 lines.", "spectra are already present in data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\") emissivities", "not found. Data will be compiled.\") res = hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf, binary=binary", "key in inv_dict.keys() else 'Nan'+str(key) for key in range(96)] dummy = pd.read_csv(filename, names=cols,", "`str` BPASS Identifier of the IMF to be used. The accepted IMF identifiers", "'9.4', '9.5', '9.6', '9.7', '9.8', '9.9', '10.0', '10.1', '10.2', '10.3', '10.4', '10.5', '10.6',", "Should I allow people to chose to load the data into a numpy", "\"\"\" deprecation_msg = \"set_models_path has been moved to the hoki.constants module -- In", "input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs, 'types': modeltypes, 'mixed_imf': mixedimf, 'mixed_age': mixedage, 'initial_BH':", "Using .split is not possible/convenient # The vector will tell use what we", "column_UV_em_lines = ['model_num', 'logU', 'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F',", "swap metallicity and event type return rates.swaplevel(0, 1, axis=1) def spectra_all_z(data_path, imf, binary=True):", "folder location is expected to be a string.\") # check if compiled file", "filenames because no more inputs are coming row = [1, 0, 0, 0]", "num, metallicity in enumerate(BPASS_METALLICITIES): data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data = data.loc[:, slice(BPASS_EVENT_TYPES[0],", "or just the single stars. Default=True imf : `str` BPASS Identifier of the", "first time this function is run on a folder it will generate an", "this function is run on a folder it will generate an npy file", "Load over all metallicity # ##################################### def rates_all_z(data_path, imf, binary=True): \"\"\" Loads the", "= l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then depending on whether we need the next", "stores the file in the same folder with the name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities", "(event_types, metallicity) A pandas MultiIndex dataframe containing the BPASS number of events per", "a filename\"\"\" inv_dict ={v: k for k, v in dummy_dicts[bpass_version].items()} cols = [inv_dict[key]", "'WN_hL', 'WC_hL', 'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python')", "emissivities_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS emissivities from files. Notes ----- The", "[BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns,", "in enumerate(BPASS_METALLICITIES): data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:,", "(T/TG type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path):", "'Of_hL', 'B_hL', 'A_hL', 'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL', 'Of_lL', 'B_lL', 'A_lL',", "in the accepted IMFs if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is", "[ergs/s/A], L_NUV [ergs/s/A]) for a specific imf and binary or single star population.", "- `\"imf170_300\"` Returns ------- emissivities : `numpy.ndarray` (N_Z, N_age, 4) [(metallicity, log_age, band)]", "used. The accepted IMF identifiers are: - `\"imf_chab100\"` - `\"imf_chab300\"` - `\"imf100_100\"` -", "'II', 'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass', 'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe',", "k for k, v in dummy_dicts[bpass_version].items()} cols = [inv_dict[key] if key in inv_dict.keys()", "this file:' '\\n'+path_to_settings) ######################## # LOAD DUMMY VARIABLE # ######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION):", "dtype=np.float64) rates.index.name = \"log_age\" # load supernova count files for num, metallicity in", "diagram type is invalid. \" \\ \"Available options are: 'TL', 'Tg', 'TTG'. \"", "and putting them in their respective lists elements = l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) #", "modelimfs.append(elements[0]) # and append them modeltypes.append(elements[1]) # The next step is decided according", "'6.7', '6.8', '6.9', '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9',", "'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F',", "MODEL INPUT FUNCTIONS # ######################### def model_input(path): \"\"\" Loads inputs from one file", "----- data_path : `str` The filepath to the folder containing the BPASS data", "file Parameters ---------- path : str Path to the file containing the target", "\"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path): \"\"\" Load", "2 values modelimfs.append(elements[0]) # and append them modeltypes.append(elements[1]) # The next step is", "'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines)", "line contains the imf probability and the type elif row[1]: elements = l.split()", "imf, binary=binary ) spectra = spec.output return spectra def emissivities_all_z(data_path, imf, binary=True): \"\"\"", "elif 'Optical' in path: return _optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age', 'NII6548_F',", "be a string.\" assert os.path.isfile(path), f\"File {path} does not exist, or its path", "------- \"\"\" assert isinstance(path, str), \"The location of the file is expected to", "a dataframe # with some named columns and set the datatypes to strings", "| PISNe | low_mass | |Metallicity| 0.00001 | 0.00001 | ... | 0.04", "type elif row[1]: elements = l.split() # we split the line into the", "all BPASS emissivities from files. Notes ----- The first time this function is", "hoki.load will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning) assert os.path.isdir(path), 'HOKI ERROR: The path provided does", "Loading.\") # Compile the spectra for faster reading next time otherwise else: print(\"Compiled", "row = [1, 0, 0, 0] # All potential input parameters and filename", "'mixed_age': mixedage, 'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames': str, 'model_imf': float, 'types': int, 'mixed_imf': float,", "compiled.\") res = hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf, binary=binary ) emissivities = res.output return", "particular model, will append a NaN filenames = [] modelimfs = [] modeltypes", "nebular line output information Parameters ---------- path Returns ------- \"\"\" assert isinstance(path, str),", "spectra from files. Notes ----- The first time this function is ran on", "BPASS Load over all metallicity # ##################################### def rates_all_z(data_path, imf, binary=True): \"\"\" Loads", "row[2]: # Splitting the two values and putting them in their respective lists", ") data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy() # swap", "'model_imf': float, 'types': int, 'mixed_imf': float, 'mixed_age': float, 'initial_BH': float, 'initial_P': float}) return", "`\"imf170_300\"` Returns ------- spectra : `numpy.ndarray` (13, 51, 100000) [(metallicity, log_age, wavelength)] A", "in hoki's settings Parameters ---------- path : str, Absolute path to the top", "'IIP', 'II', 'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass', 'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB',", "a string.\") # check if compiled file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\")", "folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") # Compile", "'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW',", "manual if int(elements[1]) < 2: # In this case all the other potential", "pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python') def _hrTL(path): \"\"\" Load HR diagrams (TL", "float}) return input_df ########################## # MODEL OUTPUT FUNCTIONS # ########################## def model_output(path, hr_type=None):", "and puts them in their rightful lists. elements = l.split() initialBH.append(elements[0]) initialP.append(elements[0]) #", "this function.\") # Create the output DataFrame arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns =", "to be used. The accepted IMF identifiers are: - `\"imf_chab100\"` - `\"imf_chab300\"` -", "os.path.isfile(path), f\"File {path} does not exist, or its path is incorrect.\" lines =", "Notes ----- The first time this function is ran on a folder it", "and we skip the rest to read in the next line. continue #", "same folder with the name: `all_spectra-[bin/sin]-[imf].pkl` The spectra are just read from file", "need all the other outputs, so we need the next 2 lines. #", "# BPASS Load over all metallicity # ##################################### def rates_all_z(data_path, imf, binary=True): \"\"\"", "# data frame good enough? __all__ = ['model_input', 'model_output', 'set_models_path', 'unpickle'] data_path =", "# The next step is decided according to the value of type #", "def _sn_rates(path): \"\"\" Loads One Supernova rate file into a dataframe \"\"\" return", "identifiers are: - `\"imf_chab100\"` - `\"imf_chab300\"` - `\"imf100_100\"` - `\"imf100_300\"` - `\"imf135_100\"` -", "'6.4', '6.5', '6.6', '6.7', '6.8', '6.9', '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6',", "set the vector accordingly. row = [0, 0, 1, 0] initialBH.append(np.nan) initialP.append(np.nan) continue", "mixedimf = [] mixedage = [] initialBH = [] initialP = [] #", "for a specific imf and binary or single star population. Usage: spectra[1][2][0] (gives", "is incorrect.\" lines = open(path).read().split(\"\\n\") # rows [a,b,c,d] in the BPASS manual row", "'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL', 'K_lL', 'M_lL',", "the log_age as a float. The column is a `pandas.MultiIndex` with the event", "[None,'TL', 'Tg', 'TTG'], \"HOKI ERROR: The HR diagram type is invalid. \" \\", "from files. Notes ----- The first time this function is ran on a", "containing all the BPASS emissivities for faster loading in the future. It stores", "- `\"imf170_100\"` - `\"imf170_300\"` Returns ------- `pandas.DataFrame` (51, (8,13)) (log_age, (event_types, metallicity) A", "parameters and filename # If there is no corresponding value for a particular", "warning def set_models_path(path): \"\"\" Changes the path to the stellar models in hoki's", "for your new path to take effect. \"\"\" deprecation_msg = \"set_models_path has been", "arrays as well or is the # data frame good enough? __all__ =", "'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass', 'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass',", "'FUV', 'NUV']) ########################## # NEBULAR EMISSION LINES # ########################## def nebular_lines(path): \"\"\" Load", "_stellar_numbers(path) elif \"yields\" in path: return _yields(path) elif \"starmass\" in path: return _stellar_masses(path)", "[] modelimfs = [] modeltypes = [] mixedimf = [] mixedage = []", "= ['model_input', 'model_output', 'set_models_path', 'unpickle'] data_path = pkg_resources.resource_filename('hoki', 'data') ######################## # GENERAL LOAD", "stream: settings = yaml.safe_load(stream) settings['models_path'] = path with io.open(path_to_settings, 'w', encoding='utf8') as outfile:", "know the initial BH and initial P will be NaN # but we", "HR diagrams (TL type) \"\"\" # 'a' is just a place order which", "a string.\" assert os.path.isfile(path), \"HOKI ERROR: This file does not exist, or its", "function.\") # Create the output DataFrame arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product(", "spectra are just read from file and not normalised. Input ----- data_path :", "_ionizing_flux(path): \"\"\" Load One ionizing flux file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age',", "a string.\" assert os.path.isfile(path), f\"File {path} does not exist, or its path is", "with io.open(path_to_settings, 'w', encoding='utf8') as outfile: yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True) print('Looks like everything", "file and not normalised. Input ----- data_path : `str` The path to the", "file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'V-I', 'U', 'B', 'V', 'R', 'I',", "TODO: Should I allow people to chose to load the data into a", "to a valid directory' path_to_settings = data_path+'/settings.yaml' with open(path_to_settings, 'r') as stream: settings", "row[3]: row = [1, 0, 0, 0] if row[3]: row[2] = 0 continue", "# ########################## def model_output(path, hr_type=None): \"\"\" Loads a BPASS output file Parameters ----------", "type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path): \"\"\"", "filename correct?\" \"\\n2) Trying to load an HR diagram? \" \"Make sure hr_type", "_ionizing_flux(path) elif \"colour\" in path: return _colours(path) elif \"hrs\" in path and hr_type", "\") def _sn_rates(path): \"\"\" Loads One Supernova rate file into a dataframe \"\"\"", "'Tg' or 'TTG'. Returns ------- Output Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object \"\"\"", "1, 0] initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) == 4: # If the type", "hr_type in [None,'TL', 'Tg', 'TTG'], \"HOKI ERROR: The HR diagram type is invalid.", "bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy to df from a filename\"\"\" inv_dict ={v: k for", "rates are just read from file and not normalised. Input ----- data_path :", "precompiled file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") # Otherwise compile else: print(\"Compiled file", "'UV' in path: return _UV_nebular_lines(path) elif 'Optical' in path: return _optical_nebular_lines(path) def _optical_nebular_lines(path):", "data_path, imf, binary=binary ) spectra = spec.output return spectra def emissivities_all_z(data_path, imf, binary=True):", "['model_num', 'logU', 'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F',", "The spectra are just read from file and not normalised. Input ----- data_path", "hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path): \"\"\" Load One SED file \"\"\" return", "single star population. Usage: spectra[1][2][0] (gives Nion for Z=0.0001 and log_age=6.2) \"\"\" #", "hr_type == 'Tg': return _hrTg(path) elif \"hrs\" in path and hr_type == 'TTG':", "The next step is decided according to the value of type # To", "easily load BPASS data. \"\"\" import pandas as pd import hoki.hrdiagrams as hr", "You are going to have to reload hoki for your new path to", "return spectra def emissivities_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS emissivities from files.", "'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines)", "will generate an npy file containing all the BPASS emissivities for faster loading", "One colour file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'V-I', 'U', 'B', 'V',", "from file and not normalised. Input ----- data_path : `str` The filepath to", "to the folder containing the BPASS spectra. binary : `bool` Use the binary", "expected to be a string.\") # check if compiled file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"):", "back to reading a filename or to probe those inputs. if not row[3]:", "pickle files\"\"\" assert os.path.isfile(path), 'File not found.' return pickle.load(open(path, 'rb')) # TODO: Deprecation", "the next level down should contain 'NEWBINMODS' and 'NEWSINMODS'. Notes ----- You are", "v in dummy_dicts[bpass_version].items()} cols = [inv_dict[key] if key in inv_dict.keys() else 'Nan'+str(key) for", "\"Event Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name = \"log_age\" # load supernova", "is decided according to the value of type # To know what each", "imf, binary=True): \"\"\" Loads the BPASS supernova event files. Notes ----- The rates", "spectra per metallicity for faster loading in the future. It stores the file", "hr_type='TL') def _hrTg(path): \"\"\" Load One HR diagrams (Tg type) \"\"\" a =", "path was correctly updated by looking at this file:' '\\n'+path_to_settings) ######################## # LOAD", "with some named columns and set the datatypes to strings and numbers. input_df", "'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python') def _stellar_masses(path): \"\"\" Load One stellar masses file", "Check if the given IMF is in the accepted IMFs if imf not", "= np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path): \"\"\" Load One HR", "possible pair of inputs and puts them in their rightful lists. elements =", "'TTG'. Returns ------- Output Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object \"\"\" assert isinstance(path,", "# GENERAL LOAD HELPERS # ######################## def unpickle(path): \"\"\"Extract pickle files\"\"\" assert os.path.isfile(path),", "def _ionizing_flux(path): \"\"\" Load One ionizing flux file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python',", "expected to be a string.\" assert os.path.isfile(path), \"HOKI: ERROR This file does not", "inputs are coming row = [1, 0, 0, 0] # Once we've goe", "# we split the line into the 2 values modelimfs.append(elements[0]) # and append", "NaN # but we also need to read the next line so we", "'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW',", "---------- path : str Path to the file containing the target data. hr_type", "\"\"\" Loads One Supernova rate file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\",", "for more inputs # we set the vector to either go back to", "pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'V-I', 'U', 'B', 'V', 'R', 'I', 'J', 'H', 'K',", "`\"imf_chab300\"` - `\"imf100_100\"` - `\"imf100_300\"` - `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"`", "string.\") # check if compiled file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\") spectra", "k, v in dummy_dicts[bpass_version].items()} cols = [inv_dict[key] if key in inv_dict.keys() else 'Nan'+str(key)", "filled our lists, we can put them in a dataframe # with some", "'prod_rate', 'halpha', 'FUV', 'NUV']) def _colours(path): \"\"\" Load One colour file \"\"\" return", "yields file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind', 'Z_wind',", "'g', 'r', 'i', 'z', 'f300w', 'f336w', 'f435w', 'f450w', 'f555w', 'f606w', 'f814w', 'prod_rate', 'halpha',", "do with this line. for l in lines[1:]: # This line contains the", "str, Absolute path to the top level of the stellar models this could", "'f555w', 'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV']) ########################## # NEBULAR EMISSION LINES #", "[ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for a specific imf and binary or single", "in their respective lists elements = l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then depending on", "whether we need the next line for more inputs # we set the", "[] # This goes through the file line by line. Using .split is", "BPASS files. binary : `bool` Use the binary files or just the single", "been moved to the hoki.constants module -- In future versions of hoki\" \\", "all the BPASS emissivities (Nion [1/s], L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for", "a place order which contains the whole file in an array of shape", "metallicity and event type return rates.swaplevel(0, 1, axis=1) def spectra_all_z(data_path, imf, binary=True): \"\"\"", "not normalised. Input ----- data_path : `str` The filepath to the folder containing", "open(path).read().split(\"\\n\") # rows [a,b,c,d] in the BPASS manual row = [1, 0, 0,", "def model_output(path, hr_type=None): \"\"\" Loads a BPASS output file Parameters ---------- path :", "NEBULAR EMISSION LINES # ########################## def nebular_lines(path): \"\"\" Load the nebular line output", "column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F',", "a BPASS IMF. Please select a correct IMF.\\n\"\\ \"These can be found in", "models this could be a directory named something like bpass-v2.2-newmodels and the next", "In this case all the other potential inputs are NaNs and we go", "does not correspond to a valid directory' path_to_settings = data_path+'/settings.yaml' with open(path_to_settings, 'r')", "'8.8', '8.9', '9.0', '9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '9.7', '9.8', '9.9', '10.0',", "a = np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path): \"\"\" Load One", "'TL', 'Tg', 'TTG'. \") def _sn_rates(path): \"\"\" Loads One Supernova rate file into", "a float. The column is a `pandas.MultiIndex` with the event types (level=0, `float`)", "'8.5', '8.6', '8.7', '8.8', '8.9', '9.0', '9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '9.7',", "file name row = [1, 0, 0, 0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue", "hoki.data_compilers.EmissivityCompiler( data_path, data_path, imf, binary=binary ) emissivities = res.output return emissivities ################# #", "inputs # we set the vector to either go back to reading a", "line into the 2 values modelimfs.append(elements[0]) # and append them modeltypes.append(elements[1]) # The", "BPASS output file Parameters ---------- path : str Path to the file containing", "HokiKeyError( f\"{imf} is not a BPASS IMF. Please select a correct IMF.\\n\"\\ \"These", "exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") # Otherwise", "'z', 'f300w', 'f336w', 'f435w', 'f450w', 'f555w', 'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV']) ##########################", "# we set the vector to either go back to reading a filename", "dataframe has the following structure. The index is the log_age as a float.", "loading in the future. It stores the file in the same folder with", "Returns ------- emissivities : `numpy.ndarray` (N_Z, N_age, 4) [(metallicity, log_age, band)] A 3D", "if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") # Compile the", "is incorrect.\" assert hr_type in [None,'TL', 'Tg', 'TTG'], \"HOKI ERROR: The HR diagram", "os.path.isfile(path), \"HOKI ERROR: This file does not exist, or its path is incorrect.\"", "initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) != 4: # If type is 2 or", "= l.split() # we split the line into the 2 values modelimfs.append(elements[0]) #", "our lists, we can put them in a dataframe # with some named", "def _UV_nebular_lines(path): column_UV_em_lines = ['model_num', 'logU', 'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F',", "# If there is no corresponding value for a particular model, will append", "should contain 'NEWBINMODS' and 'NEWSINMODS'. Notes ----- You are going to have to", "not possible/convenient # The vector will tell use what we should do with", "we reset the vector.. row = [0, 1, 0, 0] # ... and", "the BPASS spectra per metallicity for faster loading in the future. It stores", "and filename # If there is no corresponding value for a particular model,", "'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines) ##################################### #", "contain 'NEWBINMODS' and 'NEWSINMODS'. Notes ----- You are going to have to reload", "correct IMF.\\n\"\\ \"These can be found in the documentation of this function.\") #", "not isinstance(data_path, str): raise HokiTypeError(\"The folder location is expected to be a string.\")", "0] if row[3]: row[2] = 0 continue elif row[3]: # This reads the", "'H_sn', 'He_sn', 'Z_sn'], engine='python') def _stellar_masses(path): \"\"\" Load One stellar masses file into", "the line into the 2 values modelimfs.append(elements[0]) # and append them modeltypes.append(elements[1]) #", "lines = open(path).read().split(\"\\n\") # rows [a,b,c,d] in the BPASS manual row = [1,", "IMF. Please select a correct IMF.\\n\"\\ \"These can be found in the documentation", "the vector to be reading in filenames because no more inputs are coming", "module implements the tools to easily load BPASS data. \"\"\" import pandas as", "\"\"\"Reads in dummy to df from a filename\"\"\" inv_dict ={v: k for k,", "model_input(path): \"\"\" Loads inputs from one file and put them in a dataframe", "def _colours(path): \"\"\" Load One colour file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age',", "string if not isinstance(data_path, str): raise HokiTypeError(\"The folder location is expected to be", "next 2 lines. # We set the vector accordingly. row = [0, 0,", "\"\"\" This module implements the tools to easily load BPASS data. \"\"\" import", "a correct IMF.\") # check if data_path is a string if not isinstance(data_path,", "cols = [inv_dict[key] if key in inv_dict.keys() else 'Nan'+str(key) for key in range(96)]", "a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path): \"\"\" Load One SED file \"\"\" return pd.read_csv(path,", "of the file is expected to be a string.\" assert os.path.isfile(path), \"HOKI ERROR:", "hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError # TODO: Should I allow people to chose to", "_colours(path) elif \"hrs\" in path and hr_type == 'TL': return _hrTL(path) elif \"hrs\"", "L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for a specific imf and binary or", "'V', 'R', 'I', 'J', 'H', 'K', 'u', 'g', 'r', 'i', 'z', 'f300w', 'f336w',", "for Z=0.0001 and log_age=6.2 at 999 Angstrom) \"\"\" # Check population type star", "the hoki.constants module -- In future versions of hoki\" \\ \"calling set_models_path from", "we know the initial BH and initial P will be NaN # but", "if not row[3]: row = [1, 0, 0, 0] if row[3]: row[2] =", "Rate values | 11.0 | \"\"\" # Check population type star = \"bin\"", "'B_lL', 'A_lL', 'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python') def _yields(path): \"\"\" Load", "the spectra for faster reading next time otherwise else: print(\"Compiled file not found.", "float, 'initial_P': float}) return input_df ########################## # MODEL OUTPUT FUNCTIONS # ########################## def", "puts them in their rightful lists. elements = l.split() initialBH.append(elements[0]) initialP.append(elements[0]) # We", "spectra for faster reading next time otherwise else: print(\"Compiled file not found. Data", "_hrTTG(path): \"\"\" Load One HR diagrams (T/TG type) \"\"\" a = np.loadtxt(path) return", "# but we also need to read the next line so we set", "0] # Once we've goe through the whole file and filled our lists,", "dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP', 'II', 'Ib', 'Ic', 'LGRB', 'PISNe',", "well or is the # data frame good enough? __all__ = ['model_input', 'model_output',", "type star = \"bin\" if binary else \"sin\" # check IMF key if", "return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP', 'II', 'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass', 'e_Ia',", "folder it will generate a pickle file containing all the BPASS spectra per", "in [None,'TL', 'Tg', 'TTG'], \"HOKI ERROR: The HR diagram type is invalid. \"", "\"The location of the file is expected to be a string.\" assert os.path.isfile(path),", "str, 'model_imf': float, 'types': int, 'mixed_imf': float, 'mixed_age': float, 'initial_BH': float, 'initial_P': float})", "All potential input parameters and filename # If there is no corresponding value", "which contains the whole file in an array of shape (45900,100) a =", "[] modeltypes = [] mixedimf = [] mixedage = [] initialBH = []", "to be a string.\" assert os.path.isfile(path), \"HOKI ERROR: This file does not exist,", "event files. Notes ----- The rates are just read from file and not", "- `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- emissivities : `numpy.ndarray` (N_Z, N_age,", "them modeltypes.append(elements[1]) # The next step is decided according to the value of", "warnings from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError # TODO: Should I allow people to", "the BPASS number of events per metallicity per type. Usage: rates.loc[log_age, (type, metallicity)]", "os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") # Otherwise compile else:", "from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError # TODO: Should I allow people to chose", "went well! You can check the path was correctly updated by looking at", "pd import hoki.hrdiagrams as hr from hoki.constants import * import os import yaml", "'6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8', '6.9', '7.0', '7.1', '7.2',", "in path: return _sn_rates(path) elif \"numbers\" in path: return _stellar_numbers(path) elif \"yields\" in", "import pickle import pkg_resources import hoki.data_compilers import warnings from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError", "names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV']) def _colours(path): \"\"\" Load One colour file \"\"\"", "containing the input data. Returns ------- \"\"\" assert isinstance(path, str), \"The location of", "data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") #", "everything went well! You can check the path was correctly updated by looking", "[] initialP = [] # This goes through the file line by line.", "files or just the single stars. Default=True imf : `str` BPASS Identifier of", "through the file line by line. Using .split is not possible/convenient # The", "# and append them modeltypes.append(elements[1]) # The next step is decided according to", "EMISSION LINES # ########################## def nebular_lines(path): \"\"\" Load the nebular line output information", "'r') as stream: settings = yaml.safe_load(stream) settings['models_path'] = path with io.open(path_to_settings, 'w', encoding='utf8')", "ionizing flux file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV'])", "a correct IMF.\\n\"\\ \"These can be found in the documentation of this function.\")", "the file is expected to be a string.\" assert os.path.isfile(path), \"HOKI: ERROR This", "emissivities are just read from file and not normalised. Input ----- data_path :", "'Of_lL', 'B_lL', 'A_lL', 'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python') def _yields(path): \"\"\"", "'Tg', 'TTG'. \" if \"supernova\" in path: return _sn_rates(path) elif \"numbers\" in path:", "settings['models_path'] = path with io.open(path_to_settings, 'w', encoding='utf8') as outfile: yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True)", "'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL', 'Of_lL', 'B_lL',", "initialP = [] # This goes through the file line by line. Using", "'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines) ##################################### # BPASS", "log_age |---------|----------|------|-------|----------| | 6.0 | | ... | Event Rate values | 11.0", "- `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- spectra : `numpy.ndarray`", "(log_age, (event_types, metallicity) A pandas MultiIndex dataframe containing the BPASS number of events", "= [1, 0, 0, 0] if row[3]: row[2] = 0 continue elif row[3]:", "L_\\\\odot for Z=0.0001 and log_age=6.2 at 999 Angstrom) \"\"\" # Check population type", "supernova count files for num, metallicity in enumerate(BPASS_METALLICITIES): data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" )", "file not found. Data will be compiled\") spec = hoki.data_compilers.SpectraCompiler( data_path, data_path, imf,", "same folder with the name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities are just read from file", "pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV']) def _colours(path): \"\"\" Load One", "| 0.00001 | ... | 0.04 | 0.04 | | log_age |---------|----------|------|-------|----------| |", "'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python') def _stellar_numbers(path): \"\"\" Load One stellar type number", "string.\" assert os.path.isfile(path), \"HOKI ERROR: This file does not exist, or its path", "and the next level down should contain 'NEWBINMODS' and 'NEWSINMODS'. Notes ----- You", "[] mixedimf = [] mixedage = [] initialBH = [] initialP = []", "str): raise HokiTypeError(\"The folder location is expected to be a string.\") # check", "names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL', 'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL', 'Of_lL',", "assert os.path.isfile(path), f\"File {path} does not exist, or its path is incorrect.\" lines", "names=[\"Metallicicty\", \"Event Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name = \"log_age\" # load", "emissivities for faster loading in the future. It stores the file in the", "them in their respective lists elements = l.split() mixedimf.append(elements[0]) mixedage.append(elements[1]) # Then depending", "Available options are: 'TL', 'Tg', 'TTG'. \") def _sn_rates(path): \"\"\" Loads One Supernova", "a NaN filenames = [] modelimfs = [] modeltypes = [] mixedimf =", "return dummy ######################### # MODEL INPUT FUNCTIONS # ######################### def model_input(path): \"\"\" Loads", "mixedage.append(elements[1]) # Then depending on whether we need the next line for more", "or its path is incorrect.\" if 'UV' in path: return _UV_nebular_lines(path) elif 'Optical'", "= ['model_num', 'logU', 'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW',", "\" \"Make sure hr_type is set! Available options are: 'TL', 'Tg', 'TTG'. \")", "binary : `bool` Use the binary files or just the single stars. Default=True", "line by line. Using .split is not possible/convenient # The vector will tell", "coming row = [1, 0, 0, 0] # Once we've goe through the", "pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8',", "filepath to the folder containing the BPASS data binary : `bool` Use the", "set_models_path(path): \"\"\" Changes the path to the stellar models in hoki's settings Parameters", "events per metallicity per type. Usage: rates.loc[log_age, (type, metallicity)] Example: rates.loc[6.5, (\"Ia\", 0.02)]", "be used. The accepted IMF identifiers are: - `\"imf_chab100\"` - `\"imf_chab300\"` - `\"imf100_100\"`", "mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) != 4: # If type is", "dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python') def _hrTL(path): \"\"\" Load", "a directory named something like bpass-v2.2-newmodels and the next level down should contain", "return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python') def _hrTL(path): \"\"\" Load HR diagrams", "0.02)] Notes ----- This dataframe has the following structure. The index is the", "select a correct IMF.\") # check if data_path is a string if not", "- we reset the vector.. row = [0, 1, 0, 0] # ...", "the next line for more inputs # we set the vector to either", "# 'a' is just a place order which contains the whole file in", "the nebular line output information Parameters ---------- path Returns ------- \"\"\" assert isinstance(path,", "(BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy() # swap metallicity and event type return rates.swaplevel(0, 1,", "just the single stars. Default=True imf : `str` BPASS Identifier of the IMF", "the 2 values modelimfs.append(elements[0]) # and append them modeltypes.append(elements[1]) # The next step", "assert os.path.isfile(path), \"HOKI ERROR: This file does not exist, or its path is", "\"supernova\" in path: return _sn_rates(path) elif \"numbers\" in path: return _stellar_numbers(path) elif \"yields\"", "`\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- `pandas.DataFrame` (51,", "0.00001 | 0.00001 | ... | 0.04 | 0.04 | | log_age |---------|----------|------|-------|----------|", "HokiKeyError # TODO: Should I allow people to chose to load the data", "Loads a BPASS output file Parameters ---------- path : str Path to the", "path provided does not correspond to a valid directory' path_to_settings = data_path+'/settings.yaml' with", "generate an npy file containing all the BPASS emissivities for faster loading in", "your new path to take effect. \"\"\" deprecation_msg = \"set_models_path has been moved", "to load: 'TL', 'Tg' or 'TTG'. Returns ------- Output Data : pandas.DataFrame or", "modeltypes = [] mixedimf = [] mixedage = [] initialBH = [] initialP", "containing the BPASS files. binary : `bool` Use the binary files or just", "imf, binary=True): \"\"\" Load all BPASS spectra from files. Notes ----- The first", "Is the filename correct?\" \"\\n2) Trying to load an HR diagram? \" \"Make", "= l.split() initialBH.append(elements[0]) initialP.append(elements[0]) # We then reset the vector to be reading", "'9.3', '9.4', '9.5', '9.6', '9.7', '9.8', '9.9', '10.0', '10.1', '10.2', '10.3', '10.4', '10.5',", "just a place order which contains the whole file in an array of", "log_age, band)] A 3D numpy array containing all the BPASS emissivities (Nion [1/s],", "pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs, 'types': modeltypes, 'mixed_imf': mixedimf, 'mixed_age': mixedage, 'initial_BH': initialBH, 'initial_P':", "or its path is incorrect.\" assert hr_type in [None,'TL', 'Tg', 'TTG'], \"HOKI ERROR:", "star = \"bin\" if binary else \"sin\" # check IMF key if imf", "'model_output', 'set_models_path', 'unpickle'] data_path = pkg_resources.resource_filename('hoki', 'data') ######################## # GENERAL LOAD HELPERS #", "of the stellar models this could be a directory named something like bpass-v2.2-newmodels", "for faster reading next time otherwise else: print(\"Compiled file not found. Data will", "file and filled our lists, we can put them in a dataframe #", "`numpy.ndarray` (13, 51, 100000) [(metallicity, log_age, wavelength)] A 3D numpy array containing all", "MultiIndex dataframe containing the BPASS number of events per metallicity per type. Usage:", "diagrams (T/TG type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def", "'10.1', '10.2', '10.3', '10.4', '10.5', '10.6', '10.7', '10.8', '10.9', '11.0']) def _ionizing_flux(path): \"\"\"", "of this function.\") # Create the output DataFrame arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns", ": str Path to the file containing the input data. Returns ------- \"\"\"", "# To know what each value means, consult the BPASS manual if int(elements[1])", "numbers. input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs, 'types': modeltypes, 'mixed_imf': mixedimf, 'mixed_age': mixedage,", "# with some named columns and set the datatypes to strings and numbers.", "initialBH = [] initialP = [] # This goes through the file line", "Load One stellar masses file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age',", "lines[1:]: # This line contains the filename. if row[0]: filenames.append(l) # The next", "good enough? __all__ = ['model_input', 'model_output', 'set_models_path', 'unpickle'] data_path = pkg_resources.resource_filename('hoki', 'data') ########################", "expected to be a string.\" assert os.path.isfile(path), \"HOKI ERROR: This file does not", "Z=0.0001 and log_age=6.2) \"\"\" # Check population type star = \"bin\" if binary", "\"hrs\" in path and hr_type == 'TTG': return _hrTTG(path) else: print(\"HOKI ERROR --", "\"\"\"Extract pickle files\"\"\" assert os.path.isfile(path), 'File not found.' return pickle.load(open(path, 'rb')) # TODO:", "to easily load BPASS data. \"\"\" import pandas as pd import hoki.hrdiagrams as", "spectra : `numpy.ndarray` (13, 51, 100000) [(metallicity, log_age, wavelength)] A 3D numpy array", "correctly updated by looking at this file:' '\\n'+path_to_settings) ######################## # LOAD DUMMY VARIABLE", "reading a filename or to probe those inputs. if not row[3]: row =", "valid directory' path_to_settings = data_path+'/settings.yaml' with open(path_to_settings, 'r') as stream: settings = yaml.safe_load(stream)", ".split is not possible/convenient # The vector will tell use what we should", "# MODEL OUTPUT FUNCTIONS # ########################## def model_output(path, hr_type=None): \"\"\" Loads a BPASS", "to the stellar models in hoki's settings Parameters ---------- path : str, Absolute", "= np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path): \"\"\" Load One HR", "and the metallicity (level=1, `float`) |Event Type | Ia | IIP | ...", "be found in the documentation of this function.\") # Create the output DataFrame", "so we need the next 2 lines. # We set the vector accordingly.", "ASSISTANT:\\n1) Is the filename correct?\" \"\\n2) Trying to load an HR diagram? \"", "deprecation_msg = \"set_models_path has been moved to the hoki.constants module -- In future", "not exist, or its path is incorrect.\" lines = open(path).read().split(\"\\n\") # rows [a,b,c,d]", "'SiII1531_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines) ##################################### # BPASS Load over all", "mixedage = [] initialBH = [] initialP = [] # This goes through", "something like bpass-v2.2-newmodels and the next level down should contain 'NEWBINMODS' and 'NEWSINMODS'.", "\"\"\" Load HR diagrams (TL type) \"\"\" # 'a' is just a place", "[(metallicity, log_age, band)] A 3D numpy array containing all the BPASS emissivities (Nion", "accordingly. row = [0, 0, 1, 1] continue elif row[2]: # Splitting the", "'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F',", "This line contains the filename. if row[0]: filenames.append(l) # The next line will", "will generate a pickle file containing all the BPASS spectra per metallicity for", "= 0 continue elif row[3]: # This reads the last possible pair of", "# rows [a,b,c,d] in the BPASS manual row = [1, 0, 0, 0]", "from file and not normalised. Input ----- data_path : `str` The path to", "enough? __all__ = ['model_input', 'model_output', 'set_models_path', 'unpickle'] data_path = pkg_resources.resource_filename('hoki', 'data') ######################## #", "name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities are just read from file and not normalised. Input", "names=cols, sep=r\"\\s+\", engine='python') return dummy ######################### # MODEL INPUT FUNCTIONS # ######################### def", "import HokiDeprecationWarning, HokiKeyError # TODO: Should I allow people to chose to load", "the event types (level=0, `float`) and the metallicity (level=1, `float`) |Event Type |", "# ... and we skip the rest to read in the next line.", "or single star population. Usage: spectra[1][2][0] (gives Nion for Z=0.0001 and log_age=6.2) \"\"\"", "them in a dataframe # with some named columns and set the datatypes", "# TODO: Deprecation warning def set_models_path(path): \"\"\" Changes the path to the stellar", "# check if data_path is a string if not isinstance(data_path, str): raise HokiTypeError(\"The", "of events per metallicity per type. Usage: rates.loc[log_age, (type, metallicity)] Example: rates.loc[6.5, (\"Ia\",", "the vector to either go back to reading a filename or to probe", "skip the rest to read in the next line. continue # This line", "metallicity)] Example: rates.loc[6.5, (\"Ia\", 0.02)] Notes ----- This dataframe has the following structure.", "'Tg': return _hrTg(path) elif \"hrs\" in path and hr_type == 'TTG': return _hrTTG(path)", "spectra[1][2][1000] (gives L_\\\\odot for Z=0.0001 and log_age=6.2 at 999 Angstrom) \"\"\" # Check", "path: return _sed(path) elif \"ioniz\" in path: return _ionizing_flux(path) elif \"colour\" in path:", "'H', 'K', 'u', 'g', 'r', 'i', 'z', 'f300w', 'f336w', 'f435w', 'f450w', 'f555w', 'f606w',", "!= 4: # If type is 2 or 3, we know the initial", "in path: return _ionizing_flux(path) elif \"colour\" in path: return _colours(path) elif \"hrs\" in", "[1/s], L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for a specific imf and binary", "select a correct IMF.\\n\"\\ \"These can be found in the documentation of this", "to be a string.\" assert os.path.isfile(path), f\"File {path} does not exist, or its", "expected to be a string.\" assert os.path.isfile(path), f\"File {path} does not exist, or", "Notes ----- The rates are just read from file and not normalised. Input", "Notes ----- This dataframe has the following structure. The index is the log_age", "IMF is in the accepted IMFs if imf not in BPASS_IMFS: raise HokiKeyError(", "type is 4 we need all the other outputs, so we need the", "dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind', 'E_sn', 'H_sn', 'He_sn',", "- `\"imf100_300\"` - `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns", "- `\"imf170_300\"` Returns ------- spectra : `numpy.ndarray` (13, 51, 100000) [(metallicity, log_age, wavelength)]", "and the type elif row[1]: elements = l.split() # we split the line", "files. Notes ----- The rates are just read from file and not normalised.", "= [] mixedimf = [] mixedage = [] initialBH = [] initialP =", "names=['log_age', 'Ia', 'IIP', 'II', 'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass', 'e_Ia', 'e_IIP', 'e_II', 'e_Ib',", "`\"imf100_100\"` - `\"imf100_300\"` - `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"`", "or single star population. Usage: spectra[1][2][1000] (gives L_\\\\odot for Z=0.0001 and log_age=6.2 at", "------- Output Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object \"\"\" assert isinstance(path, str), \"HOKI", "= data_path+'/settings.yaml' with open(path_to_settings, 'r') as stream: settings = yaml.safe_load(stream) settings['models_path'] = path", "Input ----- data_path : `str` The filepath to the folder containing the BPASS", "not load the Stellar Population output. \" \"\\nDEBUGGING ASSISTANT:\\n1) Is the filename correct?\"", "in the future. It stores the file in the same folder with the", "########################## def nebular_lines(path): \"\"\" Load the nebular line output information Parameters ---------- path", "`\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- spectra :", "filenames = [] modelimfs = [] modeltypes = [] mixedimf = [] mixedage", "`\"imf170_300\"` Returns ------- emissivities : `numpy.ndarray` (N_Z, N_age, 4) [(metallicity, log_age, band)] A", "append them modeltypes.append(elements[1]) # The next step is decided according to the value", "'mixed_imf': float, 'mixed_age': float, 'initial_BH': float, 'initial_P': float}) return input_df ########################## # MODEL", "engine='python', names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV']) def _colours(path): \"\"\" Load One colour file", "'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines", "[1, 0, 0, 0] # All potential input parameters and filename # If", "elif \"hrs\" in path and hr_type == 'TTG': return _hrTTG(path) else: print(\"HOKI ERROR", "'Tg', 'TTG'], \"HOKI ERROR: The HR diagram type is invalid. \" \\ \"Available", "run on a folder it will generate an npy file containing all the", "0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) != 4: # If type", "from one file and put them in a dataframe Parameters ---------- path :", "row = [0, 0, 1, 0] initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) == 4:", "or hoki.hrdiagrams.HRDiagrams object \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location of the", "compiled file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\")", "load the Stellar Population output. \" \"\\nDEBUGGING ASSISTANT:\\n1) Is the filename correct?\" \"\\n2)", "to df from a filename\"\"\" inv_dict ={v: k for k, v in dummy_dicts[bpass_version].items()}", "str): raise HokiTypeError(\"The folder location is expected to be a string.\") # Check", "is ran on a folder it will generate a pickle file containing all", "number file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'O_hL', 'Of_hL', 'B_hL',", "_stellar_masses(path): \"\"\" Load One stellar masses file into a dataframe \"\"\" return pd.read_csv(path,", "in path: return _UV_nebular_lines(path) elif 'Optical' in path: return _optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num',", "`pandas.MultiIndex` with the event types (level=0, `float`) and the metallicity (level=1, `float`) |Event", "because no more inputs are coming row = [1, 0, 0, 0] #", "(level=1, `float`) |Event Type | Ia | IIP | ... | PISNe |", "_optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW',", "# This goes through the file line by line. Using .split is not", "filename or to probe those inputs. if not row[3]: row = [1, 0,", "a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind', 'E_sn', 'H_sn',", "f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy() #", "as pd import hoki.hrdiagrams as hr from hoki.constants import * import os import", "data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])] rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy() # swap metallicity", ": pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location", "also need to read the next line so we set the vector accordingly.", "- `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- spectra", "yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True) print('Looks like everything went well! You can check the", "return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path): \"\"\" Load One SED file \"\"\"", "Check population type star = \"bin\" if binary else \"sin\" # Check if", "accepted IMF identifiers are: - `\"imf_chab100\"` - `\"imf_chab300\"` - `\"imf100_100\"` - `\"imf100_300\"` -", "with the event types (level=0, `float`) and the metallicity (level=1, `float`) |Event Type", "data.to_numpy() # swap metallicity and event type return rates.swaplevel(0, 1, axis=1) def spectra_all_z(data_path,", "specific imf and binary or single star population. Usage: spectra[1][2][1000] (gives L_\\\\odot for", "return _sed(path) elif \"ioniz\" in path: return _ionizing_flux(path) elif \"colour\" in path: return", "band)] A 3D numpy array containing all the BPASS emissivities (Nion [1/s], L_Halpha", "One SED file \"\"\" return pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL', '6.0', '6.1', '6.2', '6.3',", "accepted IMFs if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not a", "100000) [(metallicity, log_age, wavelength)] A 3D numpy array containing all the BPASS spectra", "the name: `all_spectra-[bin/sin]-[imf].pkl` The spectra are just read from file and not normalised.", "\"yields\" in path: return _yields(path) elif \"starmass\" in path: return _stellar_masses(path) elif \"spectra\"", "the rest to read in the next line. continue # This line contains", "\"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP', 'II', 'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass',", "models in hoki's settings Parameters ---------- path : str, Absolute path to the", "diagrams (TL type) \"\"\" # 'a' is just a place order which contains", "\"spectra\" in path: return _sed(path) elif \"ioniz\" in path: return _ionizing_flux(path) elif \"colour\"", "types (level=0, `float`) and the metallicity (level=1, `float`) |Event Type | Ia |", "no more inputs are coming row = [1, 0, 0, 0] # Once", "the same folder with the name: `all_spectra-[bin/sin]-[imf].pkl` The spectra are just read from", "Otherwise compile else: print(\"Compiled file not found. Data will be compiled\") spec =", "'8.4', '8.5', '8.6', '8.7', '8.8', '8.9', '9.0', '9.1', '9.2', '9.3', '9.4', '9.5', '9.6',", "######################## def unpickle(path): \"\"\"Extract pickle files\"\"\" assert os.path.isfile(path), 'File not found.' return pickle.load(open(path,", "elif \"numbers\" in path: return _stellar_numbers(path) elif \"yields\" in path: return _yields(path) elif", "'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F',", "in path: return _sed(path) elif \"ioniz\" in path: return _ionizing_flux(path) elif \"colour\" in", "be a string.\" assert os.path.isfile(path), \"HOKI: ERROR This file does not exist, or", "be a string.\") # Check if compiled spectra are already present in data", "check the path was correctly updated by looking at this file:' '\\n'+path_to_settings) ########################", "names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines = ['model_num', 'logU', 'log_nH', 'log_age', 'HeII1640_F', 'HeII1640_EW', 'CIII1907_F', 'CIII1907_EW',", "hoki.constants import * import os import yaml import io import pickle import pkg_resources", "file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia', 'IIP', 'II', 'Ib',", "a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path): \"\"\" Load One HR diagrams (T/TG type) \"\"\"", "to strings and numbers. input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs, 'types': modeltypes, 'mixed_imf':", "pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64) rates.index.name = \"log_age\"", "pkg_resources.resource_filename('hoki', 'data') ######################## # GENERAL LOAD HELPERS # ######################## def unpickle(path): \"\"\"Extract pickle", "`\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- `pandas.DataFrame` (51, (8,13)) (log_age,", "We set the vector accordingly. row = [0, 0, 1, 1] continue elif", "is in the accepted IMFs if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf}", "hoki.hrdiagrams.HRDiagrams object \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location of the file", "If there is no corresponding value for a particular model, will append a", "sep=r\"\\s+\", engine='python', names=['WL', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8', '6.9',", "######################### # MODEL INPUT FUNCTIONS # ######################### def model_input(path): \"\"\" Loads inputs from", "row[3]: # This reads the last possible pair of inputs and puts them", "\"colour\" in path: return _colours(path) elif \"hrs\" in path and hr_type == 'TL':", "def _yields(path): \"\"\" Load One yields file into a dataframe \"\"\" return pd.read_csv(path,", "rates.swaplevel(0, 1, axis=1) def spectra_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS spectra from", "'CIII1910_F', 'CIII1910_EW', 'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW', 'OI1357_F', 'OI1357_EW', 'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F',", "the stellar models in hoki's settings Parameters ---------- path : str, Absolute path", "'HOKI ERROR: The path provided does not correspond to a valid directory' path_to_settings", "os.path.isfile(path), \"HOKI: ERROR This file does not exist, or its path is incorrect.\"", "continue elif row[2]: # Splitting the two values and putting them in their", "rightful lists. elements = l.split() initialBH.append(elements[0]) initialP.append(elements[0]) # We then reset the vector", "contains the filename. if row[0]: filenames.append(l) # The next line will contain the", "stars. Default=True imf : `str` BPASS Identifier of the IMF to be used.", "not normalised. Input ----- data_path : `str` The path to the folder containing", "dummy = pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python') return dummy ######################### # MODEL INPUT FUNCTIONS", "0] # ... and we skip the rest to read in the next", "to read in the next line. continue # This line contains the imf", "the whole file in an array of shape (45900,100) a = np.loadtxt(path) return", "file.\") emissivities = np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") # Compile the spectra for faster reading", ": str, optional Type of HR diagram to load: 'TL', 'Tg' or 'TTG'.", "import yaml import io import pickle import pkg_resources import hoki.data_compilers import warnings from", "'low_mass', 'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass', 'age_yrs'], engine='python') def _stellar_numbers(path):", "This line contains the imf probability and the type elif row[1]: elements =", "'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines) ##################################### # BPASS Load", "\"numbers\" in path: return _stellar_numbers(path) elif \"yields\" in path: return _yields(path) elif \"starmass\"", "HR diagram type is invalid. \" \\ \"Available options are: 'TL', 'Tg', 'TTG'.", "\"ioniz\" in path: return _ionizing_flux(path) elif \"colour\" in path: return _colours(path) elif \"hrs\"", "PISNe | low_mass | |Metallicity| 0.00001 | 0.00001 | ... | 0.04 |", "|Event Type | Ia | IIP | ... | PISNe | low_mass |", "Parameters ---------- path Returns ------- \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location", "and log_age=6.2 at 999 Angstrom) \"\"\" # Check population type star = \"bin\"", "model, will append a NaN filenames = [] modelimfs = [] modeltypes =", "'9.0', '9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '9.7', '9.8', '9.9', '10.0', '10.1', '10.2',", "def _hrTTG(path): \"\"\" Load One HR diagrams (T/TG type) \"\"\" a = np.loadtxt(path)", "\"\"\" Load all BPASS spectra from files. Notes ----- The first time this", "the future. It stores the file in the same folder with the name:", "invalid. \" \\ \"Available options are: 'TL', 'Tg', 'TTG'. \" if \"supernova\" in", "`\"imf170_100\"` - `\"imf170_300\"` Returns ------- `pandas.DataFrame` (51, (8,13)) (log_age, (event_types, metallicity) A pandas", "take effect. \"\"\" deprecation_msg = \"set_models_path has been moved to the hoki.constants module", "path: return _optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW',", "rates.loc[log_age, (type, metallicity)] Example: rates.loc[6.5, (\"Ia\", 0.02)] Notes ----- This dataframe has the", "'a' is just a place order which contains the whole file in an", "mixedimf, 'mixed_age': mixedage, 'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames': str, 'model_imf': float, 'types': int, 'mixed_imf':", "\"HOKI: ERROR This file does not exist, or its path is incorrect.\" if", "file does not exist, or its path is incorrect.\" if 'UV' in path:", "-- In future versions of hoki\" \\ \"calling set_models_path from hoki.load will fail\"", "in path: return _optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F',", "BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event Type\"]) rates = pd.DataFrame(index=BPASS_TIME_BINS, columns=columns, dtype=np.float64)", "[0, 0, 1, 0] initialBH.append(np.nan) initialP.append(np.nan) continue elif int(elements[1]) == 4: # If", "path: return _stellar_numbers(path) elif \"yields\" in path: return _yields(path) elif \"starmass\" in path:", "\"sin\" # Check if the given IMF is in the accepted IMFs if", "- `\"imf170_300\"` Returns ------- `pandas.DataFrame` (51, (8,13)) (log_age, (event_types, metallicity) A pandas MultiIndex", "a filename or to probe those inputs. if not row[3]: row = [1,", "the imf probability and the type elif row[1]: elements = l.split() # we", "\"\"\" # 'a' is just a place order which contains the whole file", "Splitting the two values and putting them in their respective lists elements =", "a[25500:30600,:].reshape(51,100,100), hr_type='Tg') def _hrTTG(path): \"\"\" Load One HR diagrams (T/TG type) \"\"\" a", "return pickle.load(open(path, 'rb')) # TODO: Deprecation warning def set_models_path(path): \"\"\" Changes the path", "hr_type='TTG') def _sed(path): \"\"\" Load One SED file \"\"\" return pd.read_csv(path, sep=r\"\\s+\", engine='python',", "the name: `all_ionizing-[bin/sin]-[imf].npy`. The emissivities are just read from file and not normalised.", "containing all the BPASS spectra for a specific imf and binary or single", "a folder it will generate an npy file containing all the BPASS emissivities", "# LOAD DUMMY VARIABLE # ######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy to", "then reset the vector to be reading in filenames because no more inputs", "if not isinstance(data_path, str): raise HokiTypeError(\"The folder location is expected to be a", "# This line contains the imf probability and the type elif row[1]: elements", "dataframe Parameters ---------- path : str Path to the file containing the input", "inputs. if not row[3]: row = [1, 0, 0, 0] if row[3]: row[2]", "######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy to df from a filename\"\"\" inv_dict", "engine='python', names=column_UV_em_lines) ##################################### # BPASS Load over all metallicity # ##################################### def rates_all_z(data_path,", "this function is ran on a folder it will generate a pickle file", "a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python') def _hrTL(path): \"\"\"", "| 11.0 | \"\"\" # Check population type star = \"bin\" if binary", "\"\"\" Load One colour file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'V-I', 'U',", "spectra def emissivities_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS emissivities from files. Notes", "----- You are going to have to reload hoki for your new path", "the folder containing the BPASS spectra. binary : `bool` Use the binary files", "# Compile the spectra for faster reading next time otherwise else: print(\"Compiled file", "filenames[:-1], 'model_imf': modelimfs, 'types': modeltypes, 'mixed_imf': mixedimf, 'mixed_age': mixedage, 'initial_BH': initialBH, 'initial_P': initialP}).astype({'filenames':", "type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path): \"\"\"", "and 'NEWSINMODS'. Notes ----- You are going to have to reload hoki for", "'e_low_mass', 'age_yrs'], engine='python') def _stellar_numbers(path): \"\"\" Load One stellar type number file into", "a pickle file containing all the BPASS spectra per metallicity for faster loading", "reset the vector.. row = [0, 1, 0, 0] # ... and we", "np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG') def _sed(path): \"\"\" Load One SED file", "data_path, data_path, imf, binary=binary ) spectra = spec.output return spectra def emissivities_all_z(data_path, imf,", "so we set the vector accordingly. row = [0, 0, 1, 0] initialBH.append(np.nan)", "'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path, skiprows=1,", "in path and hr_type == 'TTG': return _hrTTG(path) else: print(\"HOKI ERROR -- Could", "'7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6',", "= pkg_resources.resource_filename('hoki', 'data') ######################## # GENERAL LOAD HELPERS # ######################## def unpickle(path): \"\"\"Extract", "'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW', 'OI6300_F', 'OI6300_EW', 'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW', 'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F',", "vector to be reading in filenames because no more inputs are coming row", "module -- In future versions of hoki\" \\ \"calling set_models_path from hoki.load will", "containing the target data. hr_type : str, optional Type of HR diagram to", "and log_age=6.2) \"\"\" # Check population type star = \"bin\" if binary else", "isinstance(path, str), \"The location of the file is expected to be a string.\"", "new path to take effect. \"\"\" deprecation_msg = \"set_models_path has been moved to", "1, 0, 0] # ... and we skip the rest to read in", "next line. continue # This line contains the imf probability and the type", "\"HOKI ERROR: The location of the file is expected to be a string.\"", "what we should do with this line. for l in lines[1:]: # This", "sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind', 'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python') def _stellar_masses(path):", "\"set_models_path has been moved to the hoki.constants module -- In future versions of", "time otherwise else: print(\"Compiled file not found. Data will be compiled.\") res =", "'set_models_path', 'unpickle'] data_path = pkg_resources.resource_filename('hoki', 'data') ######################## # GENERAL LOAD HELPERS # ########################", "SED file \"\"\" return pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL', '6.0', '6.1', '6.2', '6.3', '6.4',", "data. hr_type : str, optional Type of HR diagram to load: 'TL', 'Tg'", "'10.9', '11.0']) def _ionizing_flux(path): \"\"\" Load One ionizing flux file \"\"\" return pd.read_csv(path,", "stellar masses file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'],", "initialP}).astype({'filenames': str, 'model_imf': float, 'types': int, 'mixed_imf': float, 'mixed_age': float, 'initial_BH': float, 'initial_P':", "can check the path was correctly updated by looking at this file:' '\\n'+path_to_settings)", "target data. hr_type : str, optional Type of HR diagram to load: 'TL',", "know what each value means, consult the BPASS manual if int(elements[1]) < 2:", "'10.0', '10.1', '10.2', '10.3', '10.4', '10.5', '10.6', '10.7', '10.8', '10.9', '11.0']) def _ionizing_flux(path):", "folder location is expected to be a string.\") # Check if compiled spectra", "dummy to df from a filename\"\"\" inv_dict ={v: k for k, v in", "0, 1, 1] continue elif row[2]: # Splitting the two values and putting", "of inputs and puts them in their rightful lists. elements = l.split() initialBH.append(elements[0])", "rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy() # swap metallicity and event type return rates.swaplevel(0,", "initialP.append(elements[0]) # We then reset the vector to be reading in filenames because", "output DataFrame arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\", \"Event Type\"])", "row = [0, 1, 0, 0] # ... and we skip the rest", "We then reset the vector to be reading in filenames because no more", "# check if compiled file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\") spectra =", "= open(path).read().split(\"\\n\") # rows [a,b,c,d] in the BPASS manual row = [1, 0,", "from files. Notes ----- The first time this function is run on a", "file line by line. Using .split is not possible/convenient # The vector will", "IMF.\\n\"\\ \"These can be found in the documentation of this function.\") # Create", "elif \"yields\" in path: return _yields(path) elif \"starmass\" in path: return _stellar_masses(path) elif", "the BPASS supernova event files. Notes ----- The rates are just read from", "---------- path : str, Absolute path to the top level of the stellar", "path and hr_type == 'TL': return _hrTL(path) elif \"hrs\" in path and hr_type", "`str` The path to the folder containing the BPASS files. binary : `bool`", "the metallicity (level=1, `float`) |Event Type | Ia | IIP | ... |", "'9.9', '10.0', '10.1', '10.2', '10.3', '10.4', '10.5', '10.6', '10.7', '10.8', '10.9', '11.0']) def", "4: # If type is 2 or 3, we know the initial BH", "file exists if os.path.isfile(f\"{data_path}/all_spectra-{star}-{imf}.npy\"): print(\"Loading precompiled file.\") spectra = np.load(f\"{data_path}/all_spectra-{star}-{imf}.npy\") print(\"Done Loading.\") #", "BPASS emissivities (Nion [1/s], L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for a specific", "assert os.path.isdir(path), 'HOKI ERROR: The path provided does not correspond to a valid", "------- emissivities : `numpy.ndarray` (N_Z, N_age, 4) [(metallicity, log_age, band)] A 3D numpy", "One stellar type number file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age',", "'Halpha_EW', 'Hbeta_F', 'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path):", "in their rightful lists. elements = l.split() initialBH.append(elements[0]) initialP.append(elements[0]) # We then reset", "str), \"HOKI ERROR: The location of the file is expected to be a", "= [0, 0, 1, 1] continue elif row[2]: # Splitting the two values", "file containing all the BPASS spectra per metallicity for faster loading in the", "of the file is expected to be a string.\" assert os.path.isfile(path), \"HOKI: ERROR", "by line. Using .split is not possible/convenient # The vector will tell use", "= \"set_models_path has been moved to the hoki.constants module -- In future versions", "log_age=6.2 at 999 Angstrom) \"\"\" # Check population type star = \"bin\" if", "go back to reading a filename or to probe those inputs. if not", "there is no corresponding value for a particular model, will append a NaN", "def model_input(path): \"\"\" Loads inputs from one file and put them in a", "folder containing the BPASS files. binary : `bool` Use the binary files or", "initialP.append(np.nan) continue elif int(elements[1]) == 4: # If the type is 4 we", "Compile the spectra for faster reading next time otherwise else: print(\"Compiled file not", "to either go back to reading a filename or to probe those inputs.", "need the next 2 lines. # We set the vector accordingly. row =", "= [] # This goes through the file line by line. Using .split", "'6.5', '6.6', '6.7', '6.8', '6.9', '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7',", "= [] modeltypes = [] mixedimf = [] mixedage = [] initialBH =", "stores the file in the same folder with the name: `all_spectra-[bin/sin]-[imf].pkl` The spectra", "imf probability and the type - we reset the vector.. row = [0,", "imf, binary=binary ) emissivities = res.output return emissivities ################# # # ################# def", "'Hbeta_EW', 'HeI4686_F', 'HeI4686_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_opt_em_lines) def _UV_nebular_lines(path): column_UV_em_lines =", "a new file name row = [1, 0, 0, 0] mixedimf.append(0.0) mixedage.append(0.0) initialBH.append(np.nan)", "directory named something like bpass-v2.2-newmodels and the next level down should contain 'NEWBINMODS'", "location of the file is expected to be a string.\" assert os.path.isfile(path), f\"File", "of HR diagram to load: 'TL', 'Tg' or 'TTG'. Returns ------- Output Data", "------- `pandas.DataFrame` (51, (8,13)) (log_age, (event_types, metallicity) A pandas MultiIndex dataframe containing the", "location is expected to be a string.\") # Check if compiled spectra are", "(Nion [1/s], L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for a specific imf and", "Load One SED file \"\"\" return pd.read_csv(path, sep=r\"\\s+\", engine='python', names=['WL', '6.0', '6.1', '6.2',", "\"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind', 'E_sn', 'H_sn', 'He_sn', 'Z_sn'],", "inv_dict.keys() else 'Nan'+str(key) for key in range(96)] dummy = pd.read_csv(filename, names=cols, sep=r\"\\s+\", engine='python')", "LINES # ########################## def nebular_lines(path): \"\"\" Load the nebular line output information Parameters", "values modelimfs.append(elements[0]) # and append them modeltypes.append(elements[1]) # The next step is decided", "case all the other potential inputs are NaNs and we go back to", "top level of the stellar models this could be a directory named something", "pair of inputs and puts them in their rightful lists. elements = l.split()", "`\"imf100_300\"` - `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns -------", "for Z=0.0001 and log_age=6.2) \"\"\" # Check population type star = \"bin\" if", "is the # data frame good enough? __all__ = ['model_input', 'model_output', 'set_models_path', 'unpickle']", "N_age, 4) [(metallicity, log_age, band)] A 3D numpy array containing all the BPASS", "'NEWSINMODS'. Notes ----- You are going to have to reload hoki for your", "line so we set the vector accordingly. row = [0, 0, 1, 0]", "its path is incorrect.\" lines = open(path).read().split(\"\\n\") # rows [a,b,c,d] in the BPASS", "if imf not in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not a BPASS IMF.", "Create the output DataFrame arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES] columns = pd.MultiIndex.from_product( arrays, names=[\"Metallicicty\",", "all the BPASS spectra per metallicity for faster loading in the future. It", "\" if \"supernova\" in path: return _sn_rates(path) elif \"numbers\" in path: return _stellar_numbers(path)", "(45900,100) a = np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path): \"\"\" Load", "the last possible pair of inputs and puts them in their rightful lists.", "1, axis=1) def spectra_all_z(data_path, imf, binary=True): \"\"\" Load all BPASS spectra from files.", "initial P will be NaN # but we also need to read the", "rates.loc[6.5, (\"Ia\", 0.02)] Notes ----- This dataframe has the following structure. The index", "could be a directory named something like bpass-v2.2-newmodels and the next level down", "= np.load(f\"{data_path}/all_ionizing-{star}-{imf}.npy\") print(\"Done Loading.\") # Compile the spectra for faster reading next time", "One ionizing flux file \"\"\" return pd.read_csv(path, sep=r'\\s+', engine='python', names=['log_age', 'prod_rate', 'halpha', 'FUV',", "people to chose to load the data into a numpy arrays as well", "found. Data will be compiled\") spec = hoki.data_compilers.SpectraCompiler( data_path, data_path, imf, binary=binary )", "pandas as pd import hoki.hrdiagrams as hr from hoki.constants import * import os", "we need the next 2 lines. # We set the vector accordingly. row", "to reload hoki for your new path to take effect. \"\"\" deprecation_msg =", "on whether we need the next line for more inputs # we set", "'U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'u', 'g', 'r', 'i', 'z',", "'initial_BH': float, 'initial_P': float}) return input_df ########################## # MODEL OUTPUT FUNCTIONS # ##########################", "TODO: Deprecation warning def set_models_path(path): \"\"\" Changes the path to the stellar models", "reads the last possible pair of inputs and puts them in their rightful", "def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age', 'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW', 'SiII6716_F', 'SiII6716_EW', 'SiII6731_F',", "= [1, 0, 0, 0] # Once we've goe through the whole file", "'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python',", "can be found in the documentation of this function.\") # Create the output", "object \"\"\" assert isinstance(path, str), \"HOKI ERROR: The location of the file is", "open(path_to_settings, 'r') as stream: settings = yaml.safe_load(stream) settings['models_path'] = path with io.open(path_to_settings, 'w',", "are already present in data folder if os.path.isfile(f\"{data_path}/all_ionizing-{star}-{imf}.npy\"): print(\"Load precompiled file.\") emissivities =", "\"HOKI ERROR: This file does not exist, or its path is incorrect.\" assert", "return pd.read_csv(path, skiprows=1, sep=r'\\s+', engine='python', names=column_UV_em_lines) ##################################### # BPASS Load over all metallicity", "modeltypes.append(elements[1]) # The next step is decided according to the value of type", "on a folder it will generate a pickle file containing all the BPASS", "masses file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python')", "type is 2 or 3, we know the initial BH and initial P", "binary=True): \"\"\" Load all BPASS emissivities from files. Notes ----- The first time", "This file does not exist, or its path is incorrect.\" if 'UV' in", "IMF. Please select a correct IMF.\") # check if data_path is a string", "names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind', 'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python') def _stellar_masses(path): \"\"\"", "all BPASS spectra from files. Notes ----- The first time this function is", "to reading a filename or to probe those inputs. if not row[3]: row", "elif row[2]: # Splitting the two values and putting them in their respective", "the binary files or just the single stars. Default=True imf : `str` BPASS", "{path} does not exist, or its path is incorrect.\" lines = open(path).read().split(\"\\n\") #", "rows [a,b,c,d] in the BPASS manual row = [1, 0, 0, 0] #", "Once we've goe through the whole file and filled our lists, we can", "f\"File {path} does not exist, or its path is incorrect.\" lines = open(path).read().split(\"\\n\")", "import warnings from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError # TODO: Should I allow people", "2: # In this case all the other potential inputs are NaNs and", "\"\\n2) Trying to load an HR diagram? \" \"Make sure hr_type is set!", "the IMF to be used. The accepted IMF identifiers are: - `\"imf_chab100\"` -", "HR diagram? \" \"Make sure hr_type is set! Available options are: 'TL', 'Tg',", "the BPASS manual row = [1, 0, 0, 0] # All potential input", "\"\"\" Loads inputs from one file and put them in a dataframe Parameters", "in BPASS_IMFS: raise HokiKeyError( f\"{imf} is not a BPASS IMF. Please select a", "Returns ------- Output Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object \"\"\" assert isinstance(path, str),", "path is incorrect.\" lines = open(path).read().split(\"\\n\") # rows [a,b,c,d] in the BPASS manual", "# If type is 2 or 3, we know the initial BH and", "a dataframe Parameters ---------- path : str Path to the file containing the", "file is expected to be a string.\" assert os.path.isfile(path), \"HOKI ERROR: This file", "ERROR This file does not exist, or its path is incorrect.\" if 'UV'", "vector will tell use what we should do with this line. for l", "log_age, wavelength)] A 3D numpy array containing all the BPASS spectra for a", "exist, or its path is incorrect.\" lines = open(path).read().split(\"\\n\") # rows [a,b,c,d] in", "an array of shape (45900,100) a = np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL')", "all metallicity # ##################################### def rates_all_z(data_path, imf, binary=True): \"\"\" Loads the BPASS supernova", "hr_type == 'TTG': return _hrTTG(path) else: print(\"HOKI ERROR -- Could not load the", "path : str Path to the file containing the target data. hr_type :", "'7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '8.0', '8.1', '8.2', '8.3', '8.4', '8.5',", "be a string.\" assert os.path.isfile(path), \"HOKI ERROR: This file does not exist, or", "# Check if the given IMF is in the accepted IMFs if imf", "data_path is a string if not isinstance(data_path, str): raise HokiTypeError(\"The folder location is", "future versions of hoki\" \\ \"calling set_models_path from hoki.load will fail\" warnings.warn(deprecation_msg, HokiDeprecationWarning)", "'8.6', '8.7', '8.8', '8.9', '9.0', '9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '9.7', '9.8',", "DUMMY VARIABLE # ######################## def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION): \"\"\"Reads in dummy to df from", "Population output. \" \"\\nDEBUGGING ASSISTANT:\\n1) Is the filename correct?\" \"\\n2) Trying to load", "filenames.append(l) # The next line will contain the imf probability and the type", "path: return _UV_nebular_lines(path) elif 'Optical' in path: return _optical_nebular_lines(path) def _optical_nebular_lines(path): column_opt_em_lines=['model_num', 'logU',", "type # To know what each value means, consult the BPASS manual if", "if binary else \"sin\" # check IMF key if imf not in BPASS_IMFS:", "is not possible/convenient # The vector will tell use what we should do", "input parameters and filename # If there is no corresponding value for a", "Stellar Population output. \" \"\\nDEBUGGING ASSISTANT:\\n1) Is the filename correct?\" \"\\n2) Trying to", "metallicity in enumerate(BPASS_METALLICITIES): data = model_output( f\"{data_path}/supernova-{star}-{imf}.{metallicity}.dat\" ) data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])]", "binary=True): \"\"\" Loads the BPASS supernova event files. Notes ----- The rates are", "compiled\") spec = hoki.data_compilers.SpectraCompiler( data_path, data_path, imf, binary=binary ) spectra = spec.output return", "file is expected to be a string.\" assert os.path.isfile(path), f\"File {path} does not", "'10.8', '10.9', '11.0']) def _ionizing_flux(path): \"\"\" Load One ionizing flux file \"\"\" return", "- `\"imf135_100\"` - `\"imf135_300\"` - `\"imfall_300\"` - `\"imf170_100\"` - `\"imf170_300\"` Returns ------- `pandas.DataFrame`", "float, 'initial_BH': float, 'initial_P': float}) return input_df ########################## # MODEL OUTPUT FUNCTIONS #", "(TL type) \"\"\" # 'a' is just a place order which contains the", "with open(path_to_settings, 'r') as stream: settings = yaml.safe_load(stream) settings['models_path'] = path with io.open(path_to_settings,", "0, 0] # ... and we skip the rest to read in the", "assert isinstance(path, str), \"The location of the file is expected to be a", "'FUV', 'NUV']) def _colours(path): \"\"\" Load One colour file \"\"\" return pd.read_csv(path, sep=r'\\s+',", "line for more inputs # we set the vector to either go back", "path and hr_type == 'Tg': return _hrTg(path) elif \"hrs\" in path and hr_type", "int(elements[1]) != 4: # If type is 2 or 3, we know the", "Could not load the Stellar Population output. \" \"\\nDEBUGGING ASSISTANT:\\n1) Is the filename", "[1, 0, 0, 0] # Once we've goe through the whole file and", "'WNH_hL', 'WN_hL', 'WC_hL', 'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'],", "level down should contain 'NEWBINMODS' and 'NEWSINMODS'. Notes ----- You are going to", "One Supernova rate file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'Ia',", "'mixed_age': float, 'initial_BH': float, 'initial_P': float}) return input_df ########################## # MODEL OUTPUT FUNCTIONS", "documentation of this function.\") # Create the output DataFrame arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES]", "One stellar masses file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'stellar_mass',", "the path was correctly updated by looking at this file:' '\\n'+path_to_settings) ######################## #", "import pkg_resources import hoki.data_compilers import warnings from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError # TODO:", "the stellar models this could be a directory named something like bpass-v2.2-newmodels and", "like everything went well! You can check the path was correctly updated by", "more inputs # we set the vector to either go back to reading", "return _sn_rates(path) elif \"numbers\" in path: return _stellar_numbers(path) elif \"yields\" in path: return", "next level down should contain 'NEWBINMODS' and 'NEWSINMODS'. Notes ----- You are going", "LOAD HELPERS # ######################## def unpickle(path): \"\"\"Extract pickle files\"\"\" assert os.path.isfile(path), 'File not", "HR diagrams (T/TG type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100), a[35700:40800,:].reshape(51,100,100), a[40800:,:].reshape(51,100,100), hr_type='TTG')", "first time this function is ran on a folder it will generate a", "path to take effect. \"\"\" deprecation_msg = \"set_models_path has been moved to the", "(type, metallicity)] Example: rates.loc[6.5, (\"Ia\", 0.02)] Notes ----- This dataframe has the following", "'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL', 'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python') def _yields(path):", "_stellar_numbers(path): \"\"\" Load One stellar type number file into a dataframe \"\"\" return", "are going to have to reload hoki for your new path to take", "file into a dataframe \"\"\" return pd.read_csv(path, sep=r\"\\s+\", names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind',", "value of type # To know what each value means, consult the BPASS", "for a particular model, will append a NaN filenames = [] modelimfs =", "Load the nebular line output information Parameters ---------- path Returns ------- \"\"\" assert", "is expected to be a string.\" assert os.path.isfile(path), \"HOKI: ERROR This file does", "\"\"\" assert isinstance(path, str), \"The location of the file is expected to be", "npy file containing all the BPASS emissivities for faster loading in the future.", ": `numpy.ndarray` (13, 51, 100000) [(metallicity, log_age, wavelength)] A 3D numpy array containing", "np.loadtxt(path) return hr.HRDiagram(a[0:5100,:].reshape(51,100,100), a[5100:10200,:].reshape(51,100,100), a[10200:15300,:].reshape(51,100,100), hr_type='TL') def _hrTg(path): \"\"\" Load One HR diagrams", "HR diagrams (Tg type) \"\"\" a = np.loadtxt(path) return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100), a[20400:25500,:].reshape(51,100,100), a[25500:30600,:].reshape(51,100,100), hr_type='Tg')", "metallicity (level=1, `float`) |Event Type | Ia | IIP | ... | PISNe", "the file is expected to be a string.\" assert os.path.isfile(path), f\"File {path} does", "'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW', 'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW'] return pd.read_csv(path, skiprows=1, sep=r'\\s+',", "'9.8', '9.9', '10.0', '10.1', '10.2', '10.3', '10.4', '10.5', '10.6', '10.7', '10.8', '10.9', '11.0'])" ]
[ "fa = FrankaArm() if args.open_gripper: fa.open_gripper() print('Be very careful!! Make sure the robot", "JOINTS Position.') wait_for_enter() fa.reset_joints() print('Using default joint impedances to move back and forth.')", "forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using different joint impedances to move back", "to HOME JOINTS Position.') wait_for_enter() fa.reset_joints() print('Using default joint impedances to move back", "wait_for_enter(): if sys.version_info[0] < 3: raw_input('Press Enter to continue:') else: input('Press Enter to", "joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using", "'-o', action='store_true') args = parser.parse_args() print('Starting robot') fa = FrankaArm() if args.open_gripper: fa.open_gripper()", "from frankapy import FrankaConstants as FC def wait_for_enter(): if sys.version_info[0] < 3: raw_input('Press", "'-t', type=float, default=10) parser.add_argument('--open_gripper', '-o', action='store_true') args = parser.parse_args() print('Starting robot') fa =", "back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS)", "1500, 1500, 1250, 1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS) print('Remember to reset the joint_impedances to", "import FrankaArm from frankapy import FrankaConstants as FC def wait_for_enter(): if sys.version_info[0] <", "else: input('Press Enter to continue:') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--time',", "1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS) print('Remember to reset the joint_impedances to defaults.') fa.goto_joints(FC.HOME_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES)", "default=10) parser.add_argument('--open_gripper', '-o', action='store_true') args = parser.parse_args() print('Starting robot') fa = FrankaArm() if", "fa.reset_joints() print('Using default joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES)", "and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS) print('Remember", "if sys.version_info[0] < 3: raw_input('Press Enter to continue:') else: input('Press Enter to continue:')", "safely move to HOME JOINTS Position.') wait_for_enter() fa.reset_joints() print('Using default joint impedances to", "impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using different", "Make sure the robot can safely move to HOME JOINTS Position.') wait_for_enter() fa.reset_joints()", "and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using different joint impedances to move", "def wait_for_enter(): if sys.version_info[0] < 3: raw_input('Press Enter to continue:') else: input('Press Enter", "continue:') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float, default=10) parser.add_argument('--open_gripper',", "careful!! Make sure the robot can safely move to HOME JOINTS Position.') wait_for_enter()", "FrankaArm from frankapy import FrankaConstants as FC def wait_for_enter(): if sys.version_info[0] < 3:", "args = parser.parse_args() print('Starting robot') fa = FrankaArm() if args.open_gripper: fa.open_gripper() print('Be very", "= parser.parse_args() print('Starting robot') fa = FrankaArm() if args.open_gripper: fa.open_gripper() print('Be very careful!!", "1500, 1250, 1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS) print('Remember to reset the joint_impedances to defaults.')", "parser = argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float, default=10) parser.add_argument('--open_gripper', '-o', action='store_true') args = parser.parse_args()", "3: raw_input('Press Enter to continue:') else: input('Press Enter to continue:') if __name__ ==", "parser.add_argument('--open_gripper', '-o', action='store_true') args = parser.parse_args() print('Starting robot') fa = FrankaArm() if args.open_gripper:", "= argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float, default=10) parser.add_argument('--open_gripper', '-o', action='store_true') args = parser.parse_args() print('Starting", "print('Starting robot') fa = FrankaArm() if args.open_gripper: fa.open_gripper() print('Be very careful!! Make sure", "Enter to continue:') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float,", "'__main__': parser = argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float, default=10) parser.add_argument('--open_gripper', '-o', action='store_true') args =", "different joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500,", "move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000, 1000])", "type=float, default=10) parser.add_argument('--open_gripper', '-o', action='store_true') args = parser.parse_args() print('Starting robot') fa = FrankaArm()", "move to HOME JOINTS Position.') wait_for_enter() fa.reset_joints() print('Using default joint impedances to move", "joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using different joint impedances to move back and forth.') wait_for_enter()", "parser.parse_args() print('Starting robot') fa = FrankaArm() if args.open_gripper: fa.open_gripper() print('Be very careful!! Make", "move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using different joint impedances", "to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000,", "using different joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500,", "wait_for_enter() fa.reset_joints() print('Using default joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS,", "fa.goto_joints(FC.HOME_JOINTS) print('Now using different joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS,", "fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS) print('Remember to reset the", "to continue:') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float, default=10)", "continue:') else: input('Press Enter to continue:') if __name__ == '__main__': parser = argparse.ArgumentParser()", "= FrankaArm() if args.open_gripper: fa.open_gripper() print('Be very careful!! Make sure the robot can", "FrankaConstants as FC def wait_for_enter(): if sys.version_info[0] < 3: raw_input('Press Enter to continue:')", "robot can safely move to HOME JOINTS Position.') wait_for_enter() fa.reset_joints() print('Using default joint", "default joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now", "FrankaArm() if args.open_gripper: fa.open_gripper() print('Be very careful!! Make sure the robot can safely", "parser.add_argument('--time', '-t', type=float, default=10) parser.add_argument('--open_gripper', '-o', action='store_true') args = parser.parse_args() print('Starting robot') fa", "< 3: raw_input('Press Enter to continue:') else: input('Press Enter to continue:') if __name__", "import argparse import sys from frankapy import FrankaArm from frankapy import FrankaConstants as", "action='store_true') args = parser.parse_args() print('Starting robot') fa = FrankaArm() if args.open_gripper: fa.open_gripper() print('Be", "print('Now using different joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500,", "if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float, default=10) parser.add_argument('--open_gripper', '-o',", "wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS) print('Remember to reset", "back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using different joint impedances to", "print('Using default joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS)", "== '__main__': parser = argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float, default=10) parser.add_argument('--open_gripper', '-o', action='store_true') args", "print('Be very careful!! Make sure the robot can safely move to HOME JOINTS", "fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using different joint impedances to move back and forth.')", "__name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float, default=10) parser.add_argument('--open_gripper', '-o', action='store_true')", "if args.open_gripper: fa.open_gripper() print('Be very careful!! Make sure the robot can safely move", "sure the robot can safely move to HOME JOINTS Position.') wait_for_enter() fa.reset_joints() print('Using", "input('Press Enter to continue:') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--time', '-t',", "frankapy import FrankaConstants as FC def wait_for_enter(): if sys.version_info[0] < 3: raw_input('Press Enter", "can safely move to HOME JOINTS Position.') wait_for_enter() fa.reset_joints() print('Using default joint impedances", "to continue:') else: input('Press Enter to continue:') if __name__ == '__main__': parser =", "very careful!! Make sure the robot can safely move to HOME JOINTS Position.')", "1250, 1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS) print('Remember to reset the joint_impedances to defaults.') fa.goto_joints(FC.HOME_JOINTS,", "sys.version_info[0] < 3: raw_input('Press Enter to continue:') else: input('Press Enter to continue:') if", "fa.open_gripper() print('Be very careful!! Make sure the robot can safely move to HOME", "joint impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250,", "joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS) print('Remember to reset the joint_impedances", "frankapy import FrankaArm from frankapy import FrankaConstants as FC def wait_for_enter(): if sys.version_info[0]", "impedances to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250,", "Position.') wait_for_enter() fa.reset_joints() print('Using default joint impedances to move back and forth.') wait_for_enter()", "argparse.ArgumentParser() parser.add_argument('--time', '-t', type=float, default=10) parser.add_argument('--open_gripper', '-o', action='store_true') args = parser.parse_args() print('Starting robot')", "FC def wait_for_enter(): if sys.version_info[0] < 3: raw_input('Press Enter to continue:') else: input('Press", "to move back and forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using different joint", "the robot can safely move to HOME JOINTS Position.') wait_for_enter() fa.reset_joints() print('Using default", "import sys from frankapy import FrankaArm from frankapy import FrankaConstants as FC def", "wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES) fa.goto_joints(FC.HOME_JOINTS) print('Now using different joint impedances to move back and", "argparse import sys from frankapy import FrankaArm from frankapy import FrankaConstants as FC", "Enter to continue:') else: input('Press Enter to continue:') if __name__ == '__main__': parser", "import FrankaConstants as FC def wait_for_enter(): if sys.version_info[0] < 3: raw_input('Press Enter to", "raw_input('Press Enter to continue:') else: input('Press Enter to continue:') if __name__ == '__main__':", "sys from frankapy import FrankaArm from frankapy import FrankaConstants as FC def wait_for_enter():", "robot') fa = FrankaArm() if args.open_gripper: fa.open_gripper() print('Be very careful!! Make sure the", "from frankapy import FrankaArm from frankapy import FrankaConstants as FC def wait_for_enter(): if", "as FC def wait_for_enter(): if sys.version_info[0] < 3: raw_input('Press Enter to continue:') else:", "args.open_gripper: fa.open_gripper() print('Be very careful!! Make sure the robot can safely move to", "HOME JOINTS Position.') wait_for_enter() fa.reset_joints() print('Using default joint impedances to move back and", "forth.') wait_for_enter() fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000, 1000]) fa.goto_joints(FC.HOME_JOINTS) print('Remember to" ]
[ "len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.delete(filename)) elif cmd.split(' ', 1)[0]", "elif cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 1: print(\"E: Missing argument.\")", "print('FTP error: ', error) if args.ftp_server: try: with FTP(args.ftp_server) as ftp: username =", "1)[0] == command[5] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ',", "if cmd == command[1]: ftp.dir() if cmd == command[2]: print(ftp.pwd()) if cmd.split(' ',", "elif cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 1: print(\"E: Missing argument.\")", "argument.\") if cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 2: dirname =", "len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[7] and", "and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[9]", "'rb'), callback=None)) elif cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 1: print(\"E:", "and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[7]", "command[3] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.cwd(dirname)) elif cmd.split('", "and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.cwd(dirname)) elif cmd.split(' ',", "command[6] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.mkd(dirname)) elif cmd.split('", "% args.ftp_server ) password = <PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp> ')", "and len(cmd.split()) == 2: level = cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split(' ',", "', 1)[0] not in ('q', 'quit', 'exit', 'bye'): print(reminder) except ftplib.all_errors as error:", "1)[1] print(ftp.mkd(dirname)) elif cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 1: print(\"E:", "cmd = input(prompt) try: if cmd == command[0]: all_commands = command[:] all_commands.sort() print(*all_commands[0:6],", "args.ftp_server: try: with FTP(args.ftp_server) as ftp: username = input('Username (%s): ' % args.ftp_server", "'bye'): print(reminder) except ftplib.all_errors as error: print('FTP error: ', error) if args.ftp_server: try:", "', 1)[0] == command[3] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split('", "command[6] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] ==", "if cmd == command[2]: print(ftp.pwd()) if cmd.split(' ', 1)[0] == command[3] and len(cmd.split())", "and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.size(filename)) elif cmd.split(' ',", "len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[4] and", "type a valid command'): command = [ 'help', 'ls', 'pwd', 'cd', 'get', 'send',", "import argparse import getpass parser = argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args = parser.parse_args() def", "elif cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 1: print(\"E: Missing argument.\")", "== 1: print(\"E: Missing argument.\") if cmd == command[11]: os.system(\"clear\") if cmd in", "cmd.split(' ', 1)[1] print(ftp.rmd(dirname)) elif cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) ==", "# -*- coding: utf-8 -*- import os import sys from ftplib import FTP", "all_commands = command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t') if cmd == command[1]: ftp.dir()", "if cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 2: dirname = cmd.split('", "cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR \" + filename, open(str(filename), 'wb').write)) elif cmd.split(' ', 1)[0]", "open(str(filename), 'wb').write)) elif cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 1: print(\"E:", "cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 2: level = cmd.split(' ',", "== command[10] and len(cmd.split()) == 2: level = cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level))) elif", "1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 1: print(\"E:", "'pwd', 'cd', 'get', 'send', 'mkdir', 'rmdir', 'delete', 'size', 'debug', 'clear' ] shell =", "1: print(\"E: Missing argument.\") if cmd == command[11]: os.system(\"clear\") if cmd in ('q',", "print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t') if cmd == command[1]: ftp.dir() if cmd == command[2]:", "== command[9] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0]", "if cmd == command[0]: all_commands = command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t') if", "a valid command'): command = [ 'help', 'ls', 'pwd', 'cd', 'get', 'send', 'mkdir',", "'wb').write)) elif cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 1: print(\"E: Missing", "error: ', error) if args.ftp_server: try: with FTP(args.ftp_server) as ftp: username = input('Username", "cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if", "= cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split(' ', 1)[0] == command[10] and len(cmd.split())", "1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) ==", "True while shell: cmd = input(prompt) try: if cmd == command[0]: all_commands =", "'exit', 'bye'): print(ftp.quit()) shell = False if len(cmd.split()) and cmd.split(' ', 1)[0] not", "2: dirname = cmd.split(' ', 1)[1] print(ftp.rmd(dirname)) elif cmd.split(' ', 1)[0] == command[7]", "len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.cwd(dirname)) elif cmd.split(' ', 1)[0]", "1)[0] not in ('q', 'quit', 'exit', 'bye'): print(reminder) except ftplib.all_errors as error: print('FTP", "print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 2:", "cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 2: dirname = cmd.split(' ',", "argument.\") if cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 2: dirname =", "'debug', 'clear' ] shell = True while shell: cmd = input(prompt) try: if", "', 1)[0] == command[7] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split('", "', 1)[1] print(ftp.retrbinary(\"RETR \" + filename, open(str(filename), 'wb').write)) elif cmd.split(' ', 1)[0] ==", "== command[3] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.cwd(dirname)) elif", "import ftplib import argparse import getpass parser = argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args =", "os import sys from ftplib import FTP import ftplib import argparse import getpass", "except ftplib.all_errors as error: print('FTP error: ', error) if args.ftp_server: try: with FTP(args.ftp_server)", "[ 'help', 'ls', 'pwd', 'cd', 'get', 'send', 'mkdir', 'rmdir', 'delete', 'size', 'debug', 'clear'", "+ filename, open(str(filename), 'wb').write)) elif cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) ==", "== command[10] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd == command[11]:", "command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t') if cmd == command[1]: ftp.dir() if cmd", "cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if", "== command[6] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.mkd(dirname)) elif", "Missing argument.\") if cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 2: filename", "'mkdir', 'rmdir', 'delete', 'size', 'debug', 'clear' ] shell = True while shell: cmd", "== 2: dirname = cmd.split(' ', 1)[1] print(ftp.cwd(dirname)) elif cmd.split(' ', 1)[0] ==", "print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 2:", "argument.\") if cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 2: filename =", "parser.parse_args() def cli(prompt, reminder='Please type a valid command'): command = [ 'help', 'ls',", "'size', 'debug', 'clear' ] shell = True while shell: cmd = input(prompt) try:", "1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) ==", "print(ftp.retrbinary(\"RETR \" + filename, open(str(filename), 'wb').write)) elif cmd.split(' ', 1)[0] == command[4] and", "print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 2:", "cli(prompt, reminder='Please type a valid command'): command = [ 'help', 'ls', 'pwd', 'cd',", "= cmd.split(' ', 1)[1] print(ftp.delete(filename)) elif cmd.split(' ', 1)[0] == command[8] and len(cmd.split())", "argument.\") if cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 2: filename =", "= cmd.split(' ', 1)[1] print(ftp.size(filename)) elif cmd.split(' ', 1)[0] == command[9] and len(cmd.split())", "= <PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp> ') except ftplib.all_errors as error:", "1)[1] print(ftp.storbinary(\"STOR \" + filename, open(filename, 'rb'), callback=None)) elif cmd.split(' ', 1)[0] ==", "1)[0] == command[10] and len(cmd.split()) == 2: level = cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level)))", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys from ftplib", "1)[1] print(ftp.delete(filename)) elif cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 1: print(\"E:", "shell = True while shell: cmd = input(prompt) try: if cmd == command[0]:", "== command[8] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0]", "sep='\\t') print(*all_commands[6:12], sep='\\t') if cmd == command[1]: ftp.dir() if cmd == command[2]: print(ftp.pwd())", "print(*all_commands[6:12], sep='\\t') if cmd == command[1]: ftp.dir() if cmd == command[2]: print(ftp.pwd()) if", "def cli(prompt, reminder='Please type a valid command'): command = [ 'help', 'ls', 'pwd',", "command and cmd.split(' ', 1)[0] not in ('q', 'quit', 'exit', 'bye'): print(reminder) except", "('q', 'quit', 'exit', 'bye'): print(ftp.quit()) shell = False if len(cmd.split()) and cmd.split(' ',", "1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) ==", "('q', 'quit', 'exit', 'bye'): print(reminder) except ftplib.all_errors as error: print('FTP error: ', error)", "and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR \" + filename,", "command[3] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] ==", "and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[4]", "= parser.parse_args() def cli(prompt, reminder='Please type a valid command'): command = [ 'help',", "1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) ==", "from ftplib import FTP import ftplib import argparse import getpass parser = argparse.ArgumentParser(description='FTP", "command[7] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.rmd(dirname)) elif cmd.split('", "== 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[5] and len(cmd.split())", "cmd in ('q', 'quit', 'exit', 'bye'): print(ftp.quit()) shell = False if len(cmd.split()) and", "coding: utf-8 -*- import os import sys from ftplib import FTP import ftplib", "1)[0] == command[8] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.delete(filename))", "1)[0] == command[6] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ',", "valid command'): command = [ 'help', 'ls', 'pwd', 'cd', 'get', 'send', 'mkdir', 'rmdir',", "if cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 2: dirname = cmd.split('", "1)[0] == command[10] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd ==", "\" + filename, open(str(filename), 'wb').write)) elif cmd.split(' ', 1)[0] == command[4] and len(cmd.split())", "', 1)[0] == command[6] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split('", "== command[7] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.rmd(dirname)) elif", "== 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[6] and len(cmd.split())", "== command[11]: os.system(\"clear\") if cmd in ('q', 'quit', 'exit', 'bye'): print(ftp.quit()) shell =", "1)[0] == command[5] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR", "command[5] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] ==", "== command[0]: all_commands = command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t') if cmd ==", "cmd == command[11]: os.system(\"clear\") if cmd in ('q', 'quit', 'exit', 'bye'): print(ftp.quit()) shell", "filename = cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR \" + filename, open(filename, 'rb'), callback=None)) elif", "== 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[8] and len(cmd.split())", "cmd.split(' ', 1)[0] not in command and cmd.split(' ', 1)[0] not in ('q',", "import FTP import ftplib import argparse import getpass parser = argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server')", "not in command and cmd.split(' ', 1)[0] not in ('q', 'quit', 'exit', 'bye'):", "len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[8] and", "1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) ==", "+ filename, open(filename, 'rb'), callback=None)) elif cmd.split(' ', 1)[0] == command[5] and len(cmd.split())", "cmd.split(' ', 1)[0] not in ('q', 'quit', 'exit', 'bye'): print(reminder) except ftplib.all_errors as", "1)[0] == command[3] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ',", "== command[1]: ftp.dir() if cmd == command[2]: print(ftp.pwd()) if cmd.split(' ', 1)[0] ==", "cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 2: dirname = cmd.split(' ',", "', 1)[0] == command[10] and len(cmd.split()) == 2: level = cmd.split(' ', 1)[1]", "error) if args.ftp_server: try: with FTP(args.ftp_server) as ftp: username = input('Username (%s): '", "== 2: filename = cmd.split(' ', 1)[1] print(ftp.size(filename)) elif cmd.split(' ', 1)[0] ==", "', 1)[1] print(ftp.cwd(dirname)) elif cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 1:", "1)[0] == command[9] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ',", "reminder='Please type a valid command'): command = [ 'help', 'ls', 'pwd', 'cd', 'get',", "', 1)[0] == command[8] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1]", "print(reminder) except ftplib.all_errors as error: print('FTP error: ', error) if args.ftp_server: try: with", "len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[9] and", "Missing argument.\") if cmd == command[11]: os.system(\"clear\") if cmd in ('q', 'quit', 'exit',", "argument.\") if cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 2: filename =", "cmd.split(' ', 1)[1] print(ftp.mkd(dirname)) elif cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) ==", "and cmd.split(' ', 1)[0] not in command and cmd.split(' ', 1)[0] not in", "print(ftp.mkd(dirname)) elif cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 1: print(\"E: Missing", "', 1)[0] == command[4] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split('", "cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if", "'send', 'mkdir', 'rmdir', 'delete', 'size', 'debug', 'clear' ] shell = True while shell:", "', 1)[0] == command[5] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1]", "command[9] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.size(filename)) elif cmd.split('", "cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if", "print(ftp.delete(filename)) elif cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 1: print(\"E: Missing", "cmd.split(' ', 1)[1] print(ftp.cwd(dirname)) elif cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) ==", "', 1)[0] == command[6] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1]", "elif cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 1: print(\"E: Missing argument.\")", "', 1)[0] == command[9] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1]", "1)[0] == command[6] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.mkd(dirname))", "-*- coding: utf-8 -*- import os import sys from ftplib import FTP import", "if cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 2: filename = cmd.split('", "FTP import ftplib import argparse import getpass parser = argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args", "and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[8]", "1)[0] == command[4] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ',", "open(filename, 'rb'), callback=None)) elif cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 1:", "1)[0] == command[7] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.rmd(dirname))", "', 1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 1:", "command[10] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd == command[11]: os.system(\"clear\")", "not in ('q', 'quit', 'exit', 'bye'): print(reminder) except ftplib.all_errors as error: print('FTP error:", "stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp> ') except ftplib.all_errors as error: print('FTP error: ',", "', 1)[1] print(ftp.rmd(dirname)) elif cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 1:", "== 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[7] and len(cmd.split())", "if cmd == command[11]: os.system(\"clear\") if cmd in ('q', 'quit', 'exit', 'bye'): print(ftp.quit())", "command[11]: os.system(\"clear\") if cmd in ('q', 'quit', 'exit', 'bye'): print(ftp.quit()) shell = False", "args.ftp_server ) password = <PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp> ') except", "if cmd in ('q', 'quit', 'exit', 'bye'): print(ftp.quit()) shell = False if len(cmd.split())", "Missing argument.\") if cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 2: level", "filename, open(filename, 'rb'), callback=None)) elif cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) ==", "== command[2]: print(ftp.pwd()) if cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 2:", "] shell = True while shell: cmd = input(prompt) try: if cmd ==", "2: level = cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split(' ', 1)[0] == command[10]", "print(ftp.storbinary(\"STOR \" + filename, open(filename, 'rb'), callback=None)) elif cmd.split(' ', 1)[0] == command[5]", "== command[6] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0]", "2: filename = cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR \" + filename, open(filename, 'rb'), callback=None))", "ftplib import argparse import getpass parser = argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args = parser.parse_args()", "== command[7] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0]", "if cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 2: level = cmd.split('", "elif cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 1: print(\"E: Missing argument.\")", "if len(cmd.split()) and cmd.split(' ', 1)[0] not in command and cmd.split(' ', 1)[0]", "utf-8 -*- import os import sys from ftplib import FTP import ftplib import", "elif cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 1: print(\"E: Missing argument.\")", "username = input('Username (%s): ' % args.ftp_server ) password = <PASSWORD>(prompt='Password: ', stream=None)", "cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if", "== command[8] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.delete(filename)) elif", "cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if", "1)[0] == command[8] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ',", "= command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t') if cmd == command[1]: ftp.dir() if", "argument.\") if cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 2: level =", "= argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args = parser.parse_args() def cli(prompt, reminder='Please type a valid", "and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.rmd(dirname)) elif cmd.split(' ',", "cmd.split(' ', 1)[1] print(ftp.size(filename)) elif cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) ==", "and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[6]", "1)[0] == command[7] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ',", "getpass parser = argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args = parser.parse_args() def cli(prompt, reminder='Please type", "parser = argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args = parser.parse_args() def cli(prompt, reminder='Please type a", "len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.size(filename)) elif cmd.split(' ', 1)[0]", "sys from ftplib import FTP import ftplib import argparse import getpass parser =", "len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[10] and", "argument.\") if cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 2: filename =", "command[5] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR \" +", "print(\"E: Missing argument.\") if cmd == command[11]: os.system(\"clear\") if cmd in ('q', 'quit',", "' % args.ftp_server ) password = <PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp>", "input(prompt) try: if cmd == command[0]: all_commands = command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12],", "cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 2: filename = cmd.split(' ',", "= False if len(cmd.split()) and cmd.split(' ', 1)[0] not in command and cmd.split('", "'delete', 'size', 'debug', 'clear' ] shell = True while shell: cmd = input(prompt)", "(%s): ' % args.ftp_server ) password = <PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome())", "if args.ftp_server: try: with FTP(args.ftp_server) as ftp: username = input('Username (%s): ' %", "Missing argument.\") if cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 2: filename", "len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR \" + filename, open(filename,", "print(ftp.pwd()) if cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 2: dirname =", "== command[5] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0]", "1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) ==", "command[8] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] ==", "print(ftp.set_debuglevel(int(level))) elif cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 1: print(\"E: Missing", "= input('Username (%s): ' % args.ftp_server ) password = <PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username,", "parser.add_argument('ftp_server') args = parser.parse_args() def cli(prompt, reminder='Please type a valid command'): command =", "'cd', 'get', 'send', 'mkdir', 'rmdir', 'delete', 'size', 'debug', 'clear' ] shell = True", "filename = cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR \" + filename, open(str(filename), 'wb').write)) elif cmd.split('", "== 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[10] and len(cmd.split())", "command[10] and len(cmd.split()) == 2: level = cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split('", "filename, open(str(filename), 'wb').write)) elif cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 1:", "all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t') if cmd == command[1]: ftp.dir() if cmd ==", "== command[4] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0]", "', 1)[1] print(ftp.storbinary(\"STOR \" + filename, open(filename, 'rb'), callback=None)) elif cmd.split(' ', 1)[0]", "len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[6] and", "== 2: filename = cmd.split(' ', 1)[1] print(ftp.delete(filename)) elif cmd.split(' ', 1)[0] ==", "in ('q', 'quit', 'exit', 'bye'): print(reminder) except ftplib.all_errors as error: print('FTP error: ',", "= cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR \" + filename, open(str(filename), 'wb').write)) elif cmd.split(' ',", "-*- import os import sys from ftplib import FTP import ftplib import argparse", "Missing argument.\") if cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 2: dirname", "False if len(cmd.split()) and cmd.split(' ', 1)[0] not in command and cmd.split(' ',", "while shell: cmd = input(prompt) try: if cmd == command[0]: all_commands = command[:]", "== command[5] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR \"", "in ('q', 'quit', 'exit', 'bye'): print(ftp.quit()) shell = False if len(cmd.split()) and cmd.split('", "== 2: level = cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split(' ', 1)[0] ==", "command[0]: all_commands = command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t') if cmd == command[1]:", "== 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[9] and len(cmd.split())", "filename = cmd.split(' ', 1)[1] print(ftp.delete(filename)) elif cmd.split(' ', 1)[0] == command[8] and", "1)[0] == command[4] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR", "2: dirname = cmd.split(' ', 1)[1] print(ftp.cwd(dirname)) elif cmd.split(' ', 1)[0] == command[3]", "= input(prompt) try: if cmd == command[0]: all_commands = command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t')", "= cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR \" + filename, open(filename, 'rb'), callback=None)) elif cmd.split('", "as ftp: username = input('Username (%s): ' % args.ftp_server ) password = <PASSWORD>(prompt='Password:", "print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 2:", "len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.mkd(dirname)) elif cmd.split(' ', 1)[0]", "error: print('FTP error: ', error) if args.ftp_server: try: with FTP(args.ftp_server) as ftp: username", "1)[0] == command[9] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.size(filename))", "len(cmd.split()) == 2: level = cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split(' ', 1)[0]", "argument.\") if cmd == command[11]: os.system(\"clear\") if cmd in ('q', 'quit', 'exit', 'bye'):", "and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[10]", "print(ftp.quit()) shell = False if len(cmd.split()) and cmd.split(' ', 1)[0] not in command", "', stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp> ') except ftplib.all_errors as error: print('FTP error:", "dirname = cmd.split(' ', 1)[1] print(ftp.mkd(dirname)) elif cmd.split(' ', 1)[0] == command[6] and", "import sys from ftplib import FTP import ftplib import argparse import getpass parser", "command'): command = [ 'help', 'ls', 'pwd', 'cd', 'get', 'send', 'mkdir', 'rmdir', 'delete',", "cmd.split(' ', 1)[1] print(ftp.delete(filename)) elif cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) ==", "= cmd.split(' ', 1)[1] print(ftp.mkd(dirname)) elif cmd.split(' ', 1)[0] == command[6] and len(cmd.split())", "2: dirname = cmd.split(' ', 1)[1] print(ftp.mkd(dirname)) elif cmd.split(' ', 1)[0] == command[6]", "Missing argument.\") if cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 2: filename", "len(cmd.split()) and cmd.split(' ', 1)[0] not in command and cmd.split(' ', 1)[0] not", "ftplib import FTP import ftplib import argparse import getpass parser = argparse.ArgumentParser(description='FTP Client.')", "== command[9] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.size(filename)) elif", "and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR \" + filename,", "cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) ==", "= True while shell: cmd = input(prompt) try: if cmd == command[0]: all_commands", "1)[0] not in command and cmd.split(' ', 1)[0] not in ('q', 'quit', 'exit',", "print(ftp.size(filename)) elif cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 1: print(\"E: Missing", "command[9] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] ==", "== command[3] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0]", "'help', 'ls', 'pwd', 'cd', 'get', 'send', 'mkdir', 'rmdir', 'delete', 'size', 'debug', 'clear' ]", "print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 2:", "and cmd.split(' ', 1)[0] not in ('q', 'quit', 'exit', 'bye'): print(reminder) except ftplib.all_errors", "', 1)[1] print(ftp.delete(filename)) elif cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 1:", "len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[5] and", "cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 2: filename = cmd.split(' ',", "== 2: dirname = cmd.split(' ', 1)[1] print(ftp.rmd(dirname)) elif cmd.split(' ', 1)[0] ==", "'exit', 'bye'): print(reminder) except ftplib.all_errors as error: print('FTP error: ', error) if args.ftp_server:", "= [ 'help', 'ls', 'pwd', 'cd', 'get', 'send', 'mkdir', 'rmdir', 'delete', 'size', 'debug',", "'clear' ] shell = True while shell: cmd = input(prompt) try: if cmd", "', 1)[0] == command[7] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1]", "1)[1] print(ftp.size(filename)) elif cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 1: print(\"E:", "cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR \" + filename, open(filename, 'rb'), callback=None)) elif cmd.split(' ',", "cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 2: dirname = cmd.split(' ',", "FTP(args.ftp_server) as ftp: username = input('Username (%s): ' % args.ftp_server ) password =", "cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if", "cmd == command[2]: print(ftp.pwd()) if cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) ==", "input('Username (%s): ' % args.ftp_server ) password = <PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username, passwd=password)", "command[8] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.delete(filename)) elif cmd.split('", "ftp: username = input('Username (%s): ' % args.ftp_server ) password = <PASSWORD>(prompt='Password: ',", "command[1]: ftp.dir() if cmd == command[2]: print(ftp.pwd()) if cmd.split(' ', 1)[0] == command[3]", "', error) if args.ftp_server: try: with FTP(args.ftp_server) as ftp: username = input('Username (%s):", "python3 # -*- coding: utf-8 -*- import os import sys from ftplib import", "'get', 'send', 'mkdir', 'rmdir', 'delete', 'size', 'debug', 'clear' ] shell = True while", "1)[1] print(ftp.rmd(dirname)) elif cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 1: print(\"E:", "2: filename = cmd.split(' ', 1)[1] print(ftp.delete(filename)) elif cmd.split(' ', 1)[0] == command[8]", "== 2: dirname = cmd.split(' ', 1)[1] print(ftp.mkd(dirname)) elif cmd.split(' ', 1)[0] ==", "if cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 2: filename = cmd.split('", "command[4] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] ==", "and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.delete(filename)) elif cmd.split(' ',", "'bye'): print(ftp.quit()) shell = False if len(cmd.split()) and cmd.split(' ', 1)[0] not in", "ftplib.all_errors as error: print('FTP error: ', error) if args.ftp_server: try: with FTP(args.ftp_server) as", "1)[1] print(ftp.cwd(dirname)) elif cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 1: print(\"E:", "if cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 2: dirname = cmd.split('", "1)[1] print(ftp.retrbinary(\"RETR \" + filename, open(str(filename), 'wb').write)) elif cmd.split(' ', 1)[0] == command[4]", "print(ftp.rmd(dirname)) elif cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 1: print(\"E: Missing", "and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[5]", "2: filename = cmd.split(' ', 1)[1] print(ftp.size(filename)) elif cmd.split(' ', 1)[0] == command[9]", "password = <PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp> ') except ftplib.all_errors as", "cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 2: filename = cmd.split(' ',", "', 1)[1] print(ftp.size(filename)) elif cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 1:", "import getpass parser = argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args = parser.parse_args() def cli(prompt, reminder='Please", "and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd == command[11]: os.system(\"clear\") if", "== 2: filename = cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR \" + filename, open(str(filename), 'wb').write))", "<PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp> ') except ftplib.all_errors as error: print('FTP", "as error: print('FTP error: ', error) if args.ftp_server: try: with FTP(args.ftp_server) as ftp:", "argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args = parser.parse_args() def cli(prompt, reminder='Please type a valid command'):", "in command and cmd.split(' ', 1)[0] not in ('q', 'quit', 'exit', 'bye'): print(reminder)", "with FTP(args.ftp_server) as ftp: username = input('Username (%s): ' % args.ftp_server ) password", "', 1)[1] print(ftp.mkd(dirname)) elif cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 1:", "', 1)[0] not in command and cmd.split(' ', 1)[0] not in ('q', 'quit',", "elif cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 1: print(\"E: Missing argument.\")", "cmd == command[0]: all_commands = command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t') if cmd", "if cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 2: filename = cmd.split('", "== 2: filename = cmd.split(' ', 1)[1] print(ftp.storbinary(\"STOR \" + filename, open(filename, 'rb'),", "shell = False if len(cmd.split()) and cmd.split(' ', 1)[0] not in command and", "try: if cmd == command[0]: all_commands = command[:] all_commands.sort() print(*all_commands[0:6], sep='\\t') print(*all_commands[6:12], sep='\\t')", "shell: cmd = input(prompt) try: if cmd == command[0]: all_commands = command[:] all_commands.sort()", "2: filename = cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR \" + filename, open(str(filename), 'wb').write)) elif", "cmd == command[1]: ftp.dir() if cmd == command[2]: print(ftp.pwd()) if cmd.split(' ', 1)[0]", ") password = <PASSWORD>(prompt='Password: ', stream=None) ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp> ') except ftplib.all_errors", "len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.rmd(dirname)) elif cmd.split(' ', 1)[0]", "len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR \" + filename, open(str(filename),", "command[7] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] ==", "elif cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 1: print(\"E: Missing argument.\")", "== 1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[4] and len(cmd.split())", "argparse import getpass parser = argparse.ArgumentParser(description='FTP Client.') parser.add_argument('ftp_server') args = parser.parse_args() def cli(prompt,", "print(ftp.cwd(dirname)) elif cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 1: print(\"E: Missing", "', 1)[0] == command[4] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1]", "command[2]: print(ftp.pwd()) if cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 2: dirname", "Client.') parser.add_argument('ftp_server') args = parser.parse_args() def cli(prompt, reminder='Please type a valid command'): command", "', 1)[0] == command[3] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1]", "command = [ 'help', 'ls', 'pwd', 'cd', 'get', 'send', 'mkdir', 'rmdir', 'delete', 'size',", "callback=None)) elif cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 1: print(\"E: Missing", "cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 2: filename = cmd.split(' ',", "1)[0] == command[3] and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.cwd(dirname))", "'quit', 'exit', 'bye'): print(ftp.quit()) shell = False if len(cmd.split()) and cmd.split(' ', 1)[0]", "', 1)[0] == command[8] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split('", "ftp.login(user=username, passwd=password) print(ftp.getwelcome()) cli('ftp> ') except ftplib.all_errors as error: print('FTP error: ', error)", "', 1)[0] == command[5] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split('", "args = parser.parse_args() def cli(prompt, reminder='Please type a valid command'): command = [", "== command[4] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR \"", "'ls', 'pwd', 'cd', 'get', 'send', 'mkdir', 'rmdir', 'delete', 'size', 'debug', 'clear' ] shell", "cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if", "Missing argument.\") if cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 2: dirname", "dirname = cmd.split(' ', 1)[1] print(ftp.rmd(dirname)) elif cmd.split(' ', 1)[0] == command[7] and", "os.system(\"clear\") if cmd in ('q', 'quit', 'exit', 'bye'): print(ftp.quit()) shell = False if", "', 1)[0] == command[9] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd.split('", "sep='\\t') if cmd == command[1]: ftp.dir() if cmd == command[2]: print(ftp.pwd()) if cmd.split('", "print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 2:", "ftp.dir() if cmd == command[2]: print(ftp.pwd()) if cmd.split(' ', 1)[0] == command[3] and", "Missing argument.\") if cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 2: filename", "dirname = cmd.split(' ', 1)[1] print(ftp.cwd(dirname)) elif cmd.split(' ', 1)[0] == command[3] and", "= cmd.split(' ', 1)[1] print(ftp.cwd(dirname)) elif cmd.split(' ', 1)[0] == command[3] and len(cmd.split())", "try: with FTP(args.ftp_server) as ftp: username = input('Username (%s): ' % args.ftp_server )", "if cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 2: filename = cmd.split('", "'rmdir', 'delete', 'size', 'debug', 'clear' ] shell = True while shell: cmd =", "= cmd.split(' ', 1)[1] print(ftp.rmd(dirname)) elif cmd.split(' ', 1)[0] == command[7] and len(cmd.split())", "1: print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) ==", "', 1)[0] == command[10] and len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd", "'quit', 'exit', 'bye'): print(reminder) except ftplib.all_errors as error: print('FTP error: ', error) if", "print(\"E: Missing argument.\") if cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 2:", "level = cmd.split(' ', 1)[1] print(ftp.set_debuglevel(int(level))) elif cmd.split(' ', 1)[0] == command[10] and", "filename = cmd.split(' ', 1)[1] print(ftp.size(filename)) elif cmd.split(' ', 1)[0] == command[9] and", "\" + filename, open(filename, 'rb'), callback=None)) elif cmd.split(' ', 1)[0] == command[5] and", "import os import sys from ftplib import FTP import ftplib import argparse import", "and len(cmd.split()) == 2: dirname = cmd.split(' ', 1)[1] print(ftp.mkd(dirname)) elif cmd.split(' ',", "len(cmd.split()) == 1: print(\"E: Missing argument.\") if cmd == command[11]: os.system(\"clear\") if cmd", "command[4] and len(cmd.split()) == 2: filename = cmd.split(' ', 1)[1] print(ftp.retrbinary(\"RETR \" +" ]
[ "diropen, path) path = os.path.join(temp_dir, 'dir*') def diropen(path): return [f for f in", "# base file that exists path = os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file", "os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile() zip_fd", "20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count = 0", "'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested dir that exists path = os.path.join(temp_fd.name, 'dir0', 'dir1',", "mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close() # open inner zip file path = os.path.join(temp_fd.name,", "# multiple open zip files tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1 = tempfile.NamedTemporaryFile()", "shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w')", "exists in a tar path = getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container", "os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir,", "import io import shutil import tarfile import zipfile import tempfile from synapse.tests.common import", "% (temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False)) #", "fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() #", "= os.path.join(temp_dir, 'dir*') def diropen(path): [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath,", "= os.path.join(temp_dir, 'foo0') with open(f0_path, 'wb') as fd: fd.write(f0) f1 = b'B' *", "20) self.eq(buf, t1) fd.close() count += 1 self.eq(count, 6) tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir)", "fbuf) fd.close() # dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath,", "= os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf =", "# DNE base path = os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def", "# container nested base directory that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar',", "exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file", "exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested", "inner zip file path = os.path.join(temp_fd.name, 'dir0', 'foo') fd = s_filepath.openfile(path, mode='r') self.eq(fd.read(),", "* 20) zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A' * 20) zip_fd.writestr('ndir0/bbb', 'A'", "'foo2') with open(f2_path, 'wb') as fd: fd.write(f2) f3 = b'Z' * 20 f3_path", "mode='rb'): buf = fd.read() fd.seek(0) self.eq(len(buf), 20) self.eq(buf, fd.read()) self.isin(buf, [f0, f1, f2])", "'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip')", "f1, f2]) fd.close() count += 1 self.eq(count, 3) path = os.path.join(temp_dir, 'baz*', 'eir0',", "os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path)) # DNE in a real directory path =", "b'A' * 20 zip_fd.writestr('dir0/foo', abuf) fbuf2 = 'B' * 20 zip_fd.writestr('bar', fbuf2) zip_fd.close()", "req=True)] self.raises(s_exc.NoSuchPath, diropen, path) ret = [a for a in s_filepath.openfiles(None)] self.eq([], ret)", "'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE file in a", "path that DNE path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path))", "'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazr') with open(t_path, 'wb') as fd:", "TestFilePath(SynTest): def test_filepath_glob(self): temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir,", "self.raises(s_exc.NoSuchPath, diropen, path) path = os.path.join(temp_dir, 'dir*') def diropen(path): return [f for f", "tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A' * 20) zip_fd.writestr('dir0/bar', 'A' * 20)", "b'a' * 20 t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha') with open(t_path, 'wb')", "a real directory path = os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE", "path = os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs that exist path =", "zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A' * 20) zip_fd.writestr('dir0/bar', 'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' *", "'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner tar file path = getTestPath('nest2.tar',", "base directory that exists path = os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested", "# DNE base path = getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): #", "as s_filepath class TestFilePath(SynTest): def test_filepath_glob(self): temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1'))", "* 20 zip_fd.writestr('dir0/foo', abuf) fbuf2 = 'B' * 20 zip_fd.writestr('bar', fbuf2) zip_fd.close() #", "open inner zip file path = os.path.join(temp_fd.name, 'dir0', 'foo') fd = s_filepath.openfile(path, mode='r')", "def test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') abuf = 'A' *", "'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer',", "self.none(s_filepath.openfile(None, req=False)) # open a directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir)", "os.path.join(temp_fd.name, 'dir0', 'foo') fd = s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf) fd.close() temp_fd.close() temp_fd.close() def", "open inner tar file path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar') fd = s_filepath.openfile(path,", "dirs that exist path = os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open a", "nested path that DNE path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path))", "# multiple open on dne path = os.path.join(temp_dir, 'dne*') def diropen(path): return [f", "as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo') with open(t_path, 'wb')", "self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close()", "open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0 = zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2)", "# container nested DNE path path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path))", "# container is path path = nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that", "a directory path = os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested dir that", "self.false(s_filepath.isdir(path)) # DNE base path = getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self):", "# base directory that exists path = os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container", "fbuf = b'A' * 20 temp_fd.write(fbuf) temp_fd.flush() # file and dir that exist", "'dir2') self.true(s_filepath.isdir(path)) # DNE in a real directory path = os.path.join(temp_fd.name, 'dir0', 'dne')", "self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name,", "DNE path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) #", "= os.path.join(temp_dir, 'foo2') with open(f2_path, 'wb') as fd: fd.write(f2) f3 = b'Z' *", "'eir0', 'dir2')) f0 = b'A' * 20 f0_path = os.path.join(temp_dir, 'foo0') with open(f0_path,", "* import synapse.exc as s_exc import synapse.lib.filepath as s_filepath class TestFilePath(SynTest): def test_filepath_glob(self):", "getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(),", "path = os.path.join(temp_dir, 'dne*') def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb',", "zip_fd.writestr('ndir0/bbb', 'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() # container", "tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1)", "zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count = 0 for fd in", "return [f for f in s_filepath.openfiles(path, mode='rb', req=False)] self.eq([], diropen(path)) # multiple open", "path = os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path =", "zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2')", "as fd: fd.write(t1) # files that exists path = os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path))", "self.false(s_filepath.isdir(path)) # dirs that exist path = os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) #", "= tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1')", "os.path.join(temp_dir, 'junk') with open(f3_path, 'wb') as fd: fd.write(f3) # same files alpha/bravo t1", "tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp() fbuf = b'A' * 20 temp_fd.write(fbuf) temp_fd.flush() # file", "= b'A' * 20 temp_fd.write(fbuf) temp_fd.flush() # file and dir that exist self.true(s_filepath.exists(temp_fd.name))", "file path = temp_fd.name fd0 = s_filepath.openfile(path, mode='rb') fd1 = open(path, mode='rb') self.eq(fd0.read(),", "fs_fd = open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner file path", "path = os.path.join(temp_dir, 'foo*') fd_ct = 0 f = [fd for fd in", "f2 = b'C' * 20 f2_path = os.path.join(temp_dir, 'foo2') with open(f2_path, 'wb') as", "# open inner tar file path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar') fd =", "fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazr') with open(t_path, 'wb') as fd: fd.write(t1) #", "temp_fd.close() def test_filepath_tar(self): # container is path path = getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) #", "+= 1 self.eq(fd_ct, 3) # multiple open on dir path = os.path.join(temp_dir, 'dir*')", "self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w')", "'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path))", "'dir1', 'alpha') with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazer', 'eir0',", "arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count = 0 for fd", "fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() self.eq(len(buf), 20) self.eq(buf, t1) fd.close() count", "path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested DNE", "0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() fd.seek(0) self.eq(len(buf), 20) self.eq(buf,", "self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path))", "real directory path = getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base", "open tar file path = getTestPath('nest2.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest2.tar'),", "tempfile from synapse.tests.common import * import synapse.exc as s_exc import synapse.lib.filepath as s_filepath", "# open tar file path = getTestPath('nest2.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd =", "f3_path = os.path.join(temp_dir, 'junk') with open(f3_path, 'wb') as fd: fd.write(f3) # same files", "tarfile import zipfile import tempfile from synapse.tests.common import * import synapse.exc as s_exc", "20 zip_fd.writestr('dir0/foo', abuf) fbuf2 = 'B' * 20 zip_fd.writestr('bar', fbuf2) zip_fd.close() # file", "fd.seek(0) self.eq(len(buf), 20) self.eq(buf, fd.read()) self.isin(buf, [f0, f1, f2]) fd.close() count += 1", "arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1',", "in s_filepath.openfiles(path, mode='rb'): buf = fd.read() fd.seek(0) self.eq(len(buf), 20) self.eq(buf, fd.read()) self.isin(buf, [f0,", "3) # multiple open on dir path = os.path.join(temp_dir, 'dir*') def diropen(path): [f", "fd.close() count += 1 self.eq(count, 3) path = os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*')", "= getTestPath('nest2.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close()", "'bazer', 'eir0', 'dir1', 'alpha') with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir,", "open(f2_path, 'wb') as fd: fd.write(f2) f3 = b'Z' * 20 f3_path = os.path.join(temp_dir,", "a dir fp = s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) # multiple open regular files path", "fd.read()) self.isin(buf, [f0, f1, f2]) fd.close() count += 1 self.eq(count, 3) path =", "os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base file that exists", "as fd: fd.write(f1) f2 = b'C' * 20 f2_path = os.path.join(temp_dir, 'foo2') with", "'w') zfd0 = zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z'", "self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested base directory that exists path = getTestPath('nest2.tar', 'nndir0',", "multiple open on dir path = os.path.join(temp_dir, 'dir*') def diropen(path): [f for f", "nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A' * 20) zip_fd.writestr('dir0/bar', 'A'", "base path = getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): # open tar", "nested file that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path))", "for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) path = os.path.join(temp_dir, 'dir*')", "* 20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0')", "directory path = getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path", "self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): # open tar file path = getTestPath('nest2.tar') fd = s_filepath.openfile(path,", "synapse.exc as s_exc import synapse.lib.filepath as s_filepath class TestFilePath(SynTest): def test_filepath_glob(self): temp_dir =", "nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') abuf = 'A'", "s_filepath.openfiles(None)] self.eq([], ret) # multiple open zip files tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w')", "'bazer', 'eir0', 'dir1', 'bravo') with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir,", "as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazr') with open(t_path, 'wb') as fd: fd.write(t1)", "zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk')", "'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path))", "in a directory path = os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path)) # open zip file", "tar file path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd", "= tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp() fbuf = b'A' * 20 temp_fd.write(fbuf) temp_fd.flush() #", "mode='rb') fd1 = open(path, mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close() # open inner zip", "= os.path.join(temp_dir, 'foo1') with open(f1_path, 'wb') as fd: fd.write(f1) f2 = b'C' *", "file path = getTestPath('nest2.tar', 'nnfoo') fd = s_filepath.openfile(path, mode='rb') buf = b'A' *", "zipfile.ZipFile(temp_fd.name, 'w') abuf = 'A' * 20 bbuf = b'A' * 20 zip_fd.writestr('dir0/foo',", "= getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = getTestPath('nest2.tar',", "'foo1') with open(f1_path, 'wb') as fd: fd.write(f1) f2 = b'C' * 20 f2_path", "self.eq([], diropen(path)) # multiple open on dne path = os.path.join(temp_dir, 'dne*') def diropen(path):", "'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2')) f0 = b'A' * 20 f0_path", "self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE in a real directory path = os.path.join(temp_dir, 'dne')", "container nested path that DNE path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne')", "None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False)) # open a directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False))", "self.false(s_filepath.isdir(path)) # base file that exists path = os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) #", "def test_filepath_tar_open(self): # open tar file path = getTestPath('nest2.tar') fd = s_filepath.openfile(path, mode='rb')", "os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) # open regular file fd = s_filepath.openfile(temp_fd.name,", "'foo') self.true(s_filepath.isfile(path)) # open zip file path = temp_fd.name fd0 = s_filepath.openfile(path, mode='rb')", "'wb') as fd: fd.write(t1) # files that exists path = os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path))", "open a dir fp = s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) # multiple open regular files", "= getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest1.tar'), 'rb')", "mode='rb') self.eq(fd.read(), fbuf) fd.close() # dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'),", "# open zip file path = temp_fd.name fd0 = s_filepath.openfile(path, mode='rb') fd1 =", "base path = os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd", "getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file that exists in a tar path =", "self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1',", "fd1.close() # open inner zip file path = os.path.join(temp_fd.name, 'dir0', 'foo') fd =", "zip_fd.writestr('dir0/foo', abuf) fbuf2 = 'B' * 20 zip_fd.writestr('bar', fbuf2) zip_fd.close() # file that", "path = os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*') count = 0 for fd in", "fd in s_filepath.openfiles(path, mode='rb', req=False)] for fd in f: buf = fd.read() self.eq(len(buf),", "'wb') as fd: fd.write(f1) f2 = b'C' * 20 f2_path = os.path.join(temp_dir, 'foo2')", "buf = fd.read() self.eq(len(buf), 20) self.isin(buf, [f0, f1, f2]) fd.close() fd_ct += 1", "self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE file in a real directory path = getTestPath('nest2.tar', 'nndir0',", "open(t_path, 'wb') as fd: fd.write(t1) # files that exists path = os.path.join(temp_dir, 'foo*')", "zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2',", "'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2')) f0 = b'A' * 20 f0_path = os.path.join(temp_dir,", "exists path = getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file that exists in a", "f1, f2]) fd.close() fd_ct += 1 self.eq(fd_ct, 3) # multiple open on dir", "tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1 =", "self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): # open tar file path = getTestPath('nest2.tar') fd", "20) zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A' * 20) zip_fd.writestr('ndir0/bbb', 'A' *", "in a real directory path = os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) #", "fd.close() count += 1 self.eq(count, 6) tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd", "multiple open on dne path = os.path.join(temp_dir, 'dne*') def diropen(path): return [f for", "tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp() fbuf =", "directory that exists path = os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested dir", "temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') abuf =", "os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2')) f0 = b'A' * 20 f0_path = os.path.join(temp_dir, 'foo0')", "20 temp_fd.write(fbuf) temp_fd.flush() # file and dir that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir))", "self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path = os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path))", "# nested dir that exists path = os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path)) #", "path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file", "temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') abuf = 'A' * 20 bbuf", "mode='rb') fs_fd = open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner tar", "20 f1_path = os.path.join(temp_dir, 'foo1') with open(f1_path, 'wb') as fd: fd.write(f1) f2 =", "20) zip_fd.writestr('ndir0/bbb', 'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() #", "import * import synapse.exc as s_exc import synapse.lib.filepath as s_filepath class TestFilePath(SynTest): def", "t_path = os.path.join(temp_dir, 'bazr') with open(t_path, 'wb') as fd: fd.write(t1) # files that", "3) path = os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*') count = 0 for fd", "ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp() fbuf = b'A'", "dir that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE in a real", "'') self.none(s_filepath.openfile(None, req=False)) # open a directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close()", "'C' * 20) zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A' * 20) zip_fd.writestr('ndir0/bbb',", "= os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*') count = 0 for fd in s_filepath.openfiles(path,", "in s_filepath.openfiles(path, mode='rb', req=False)] self.eq([], diropen(path)) # multiple open on dne path =", "getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): # open tar file path =", "path = getTestPath('nest2.tar', 'nnfoo') fd = s_filepath.openfile(path, mode='rb') buf = b'A' * 20", "zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1')", "a real directory path = getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE", "self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) # open regular file fd = s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(),", "= b'a' * 20 t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha') with open(t_path,", "os.path.join(temp_dir, 'foo0') with open(f0_path, 'wb') as fd: fd.write(f0) f1 = b'B' * 20", "'bazer', 'eir0', 'dir2')) f0 = b'A' * 20 f0_path = os.path.join(temp_dir, 'foo0') with", "= os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha') with open(t_path, 'wb') as fd: fd.write(t1) t_path", "'C' * 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() # container is path path = nested_temp_fd.name", "20 f0_path = os.path.join(temp_dir, 'foo0') with open(f0_path, 'wb') as fd: fd.write(f0) f1 =", "'bravo') with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazr') with open(t_path,", "with open(f3_path, 'wb') as fd: fd.write(f3) # same files alpha/bravo t1 = b'a'", "test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp() fbuf = b'A' * 20 temp_fd.write(fbuf)", "zip_fd = zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A' * 20) zip_fd.writestr('dir0/bar', 'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz',", "open on dne path = os.path.join(temp_dir, 'dne*') def diropen(path): return [f for f", "'foo*') fd_ct = 0 f = [fd for fd in s_filepath.openfiles(path, mode='rb', req=False)]", "self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE in a real directory path = os.path.join(temp_dir,", "path = os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path)) # open zip file path = temp_fd.name", "'_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False)) # open a", "dir that exists path = os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path)) # DNE in", "alpha/bravo t1 = b'a' * 20 t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha')", "t1) zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path,", "tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2)", "s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) # multiple open regular files path = os.path.join(temp_dir, 'foo*') fd_ct", "import synapse.exc as s_exc import synapse.lib.filepath as s_filepath class TestFilePath(SynTest): def test_filepath_glob(self): temp_dir", "[f0, f1, f2]) fd.close() fd_ct += 1 self.eq(fd_ct, 3) # multiple open on", "self.eq(fd_ct, 3) # multiple open on dir path = os.path.join(temp_dir, 'dir*') def diropen(path):", "tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A' * 20) zip_fd.writestr('dir0/bar',", "mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False)) # open a directory", "with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo')", "f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path = os.path.join(tzfd1.name,", "self.true(s_filepath.isdir(path)) # container nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar',", "self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE in a real directory path = os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path))", "# file that exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path))", "dir path = os.path.join(temp_dir, 'dir*') def diropen(path): [f for f in s_filepath.openfiles(path, mode='rb',", "'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested path that DNE path = getTestPath('nest2.tar',", "self.none(fp.open()) # multiple open regular files path = os.path.join(temp_dir, 'foo*') fd_ct = 0", "self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1',", "self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): # open tar file path = getTestPath('nest2.tar') fd =", "container is path path = nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists", "* 20 zip_fd.writestr('bar', fbuf2) zip_fd.close() # file that exists in a directory path", "directory path = os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested dir that exists", "count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() fd.seek(0) self.eq(len(buf),", "0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() self.eq(len(buf), 20) self.eq(buf, t1)", "zip_fd = zipfile.ZipFile(temp_fd.name, 'w') abuf = 'A' * 20 bbuf = b'A' *", "as fd: fd.write(f0) f1 = b'B' * 20 f1_path = os.path.join(temp_dir, 'foo1') with", "'nndir1', 'nest1.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close()", "'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20) zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa',", "self.isin(buf, [f0, f1, f2]) fd.close() fd_ct += 1 self.eq(fd_ct, 3) # multiple open", "DNE path path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) #", "1 self.eq(count, 6) tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile() temp_dir", "zip file path = temp_fd.name fd0 = s_filepath.openfile(path, mode='rb') fd1 = open(path, mode='rb')", "zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() # container is path path = nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) #", "inner file path = getTestPath('nest2.tar', 'nnfoo') fd = s_filepath.openfile(path, mode='rb') buf = b'A'", "def test_filepath_glob(self): temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD'))", "nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip',", "diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) ret", "zfd1.close() path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'):", "path = os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested dir that exists path", "a in s_filepath.openfiles(None)] self.eq([], ret) # multiple open zip files tzfd0 = open(os.path.join(temp_dir,", "directory path = os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path)) # open zip file path =", "fd: fd.write(f0) f1 = b'B' * 20 f1_path = os.path.join(temp_dir, 'foo1') with open(f1_path,", "dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath,", "fd.write(t1) t_path = os.path.join(temp_dir, 'bazr') with open(t_path, 'wb') as fd: fd.write(t1) # files", "path = os.path.join(temp_dir, 'dir*') def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb',", "path) ret = [a for a in s_filepath.openfiles(None)] self.eq([], ret) # multiple open", "file fd = s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf) fd.close() # dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile,", "'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested DNE path path = os.path.join(nested_temp_fd.name,", "container nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path))", "that DNE path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path))", "= os.path.join(temp_dir, 'dne*') def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=True)]", "'w') tzfd1 = tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0 = zipfile.ZipFile(tzfd0.name, 'w')", "fd: fd.write(t1) # files that exists path = os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path))", "self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs that exist path = os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path))", "'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path = os.path.join(nested_temp_fd.name, 'ndir0',", "fd.write(f0) f1 = b'B' * 20 f1_path = os.path.join(temp_dir, 'foo1') with open(f1_path, 'wb')", "s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) path = os.path.join(temp_dir, 'dir*') def diropen(path): return", "= 0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() fd.seek(0) self.eq(len(buf), 20)", "= os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = os.path.join(temp_fd.name,", "20 bbuf = b'A' * 20 zip_fd.writestr('dir0/foo', abuf) fbuf2 = 'B' * 20", "getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested", "open(f3_path, 'wb') as fd: fd.write(f3) # same files alpha/bravo t1 = b'a' *", "= getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that", "'nndir0', 'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path", "self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE in a real directory path =", "path = getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path =", "self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False)) # open a directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/',", "zfd1 = zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' *", "+= 1 self.eq(count, 3) path = os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*') count =", "fd in f: buf = fd.read() self.eq(len(buf), 20) self.isin(buf, [f0, f1, f2]) fd.close()", "self.eq(buf, fd.read()) self.isin(buf, [f0, f1, f2]) fd.close() count += 1 self.eq(count, 3) path", "exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested", "= s_filepath.openfile(path, mode='rb') fd1 = open(path, mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close() # open", "DNE file in a real directory path = getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path))", "files path = os.path.join(temp_dir, 'foo*') fd_ct = 0 f = [fd for fd", "self.true(s_filepath.isfile(path)) # container nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar',", "fd: fd.write(f2) f3 = b'Z' * 20 f3_path = os.path.join(temp_dir, 'junk') with open(f3_path,", "open(f0_path, 'wb') as fd: fd.write(f0) f1 = b'B' * 20 f1_path = os.path.join(temp_dir,", "nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path = os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path))", "self.true(s_filepath.isfile(path)) # base directory that exists path = os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) #", "synapse.lib.filepath as s_filepath class TestFilePath(SynTest): def test_filepath_glob(self): temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir,", "path = temp_fd.name fd0 = s_filepath.openfile(path, mode='rb') fd1 = open(path, mode='rb') self.eq(fd0.read(), fd1.read())", "# file that exists in a tar path = getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path))", "import zipfile import tempfile from synapse.tests.common import * import synapse.exc as s_exc import", "container nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1',", "fs_fd = open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner tar file", "'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested dir that exists path = os.path.join(temp_fd.name, 'dir0',", "path = getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path = getTestPath('nest2.tar',", "# DNE in a real directory path = os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path))", "f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar')", "directory that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) #", "open on dir path = os.path.join(temp_dir, 'dir*') def diropen(path): [f for f in", "in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) ret = [a for a in", "in f: buf = fd.read() self.eq(len(buf), 20) self.isin(buf, [f0, f1, f2]) fd.close() fd_ct", "f0_path = os.path.join(temp_dir, 'foo0') with open(f0_path, 'wb') as fd: fd.write(f0) f1 = b'B'", "diropen(path)) # multiple open on dne path = os.path.join(temp_dir, 'dne*') def diropen(path): return", "on dne path = os.path.join(temp_dir, 'dne*') def diropen(path): return [f for f in", "= os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file that exists in a directory path", "self.true(s_filepath.isfile(path)) # open zip file path = temp_fd.name fd0 = s_filepath.openfile(path, mode='rb') fd1", "= tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0 = zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0)", "= tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A' * 20)", "that exists path = getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file that exists in", "zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0 =", "fd.read() self.eq(len(buf), 20) self.eq(buf, t1) fd.close() count += 1 self.eq(count, 6) tzfd0.close() tzfd1.close()", "20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20) zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A' *", "'dir0', 'foo') self.true(s_filepath.isfile(path)) # open zip file path = temp_fd.name fd0 = s_filepath.openfile(path,", "# base directory that exists path = getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file", "= getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested file that", "fd.close() fd_ct += 1 self.eq(fd_ct, 3) # multiple open on dir path =", "'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested DNE path path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip',", "self.false(s_filepath.isdir(path)) # DNE base path = os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close()", "'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd0.writestr('eir0/dir3/z1', t1)", "fd0.close() fd1.close() # open inner zip file path = os.path.join(temp_fd.name, 'dir0', 'foo') fd", "path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base file", "'foo0') with open(f0_path, 'wb') as fd: fd.write(f0) f1 = b'B' * 20 f1_path", "self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner tar file path = getTestPath('nest2.tar', 'nndir0',", "'nnfoo') fd = s_filepath.openfile(path, mode='rb') buf = b'A' * 20 buf += b'\\n'", "buf = fd.read() fd.seek(0) self.eq(len(buf), 20) self.eq(buf, fd.read()) self.isin(buf, [f0, f1, f2]) fd.close()", "shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp() fbuf = b'A' *", "exists path = os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file that exists in a", "path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest1.tar'),", "'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open a dir fp = s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open())", "count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() self.eq(len(buf), 20)", "arcname='ndir0/nested.zip') zip_fd.close() # container is path path = nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base", "def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=False)] self.eq([], diropen(path)) #", "= os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open a dir fp = s_filepath.parsePath(temp_dir,", "fd_ct = 0 f = [fd for fd in s_filepath.openfiles(path, mode='rb', req=False)] for", "os.path.join(temp_dir, 'dne*') def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath,", "zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close()", "'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base file that exists path = os.path.join(temp_fd.name,", "fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() fd.seek(0) self.eq(len(buf), 20) self.eq(buf, fd.read()) self.isin(buf,", "diropen, path) ret = [a for a in s_filepath.openfiles(None)] self.eq([], ret) # multiple", "temp_fd.name fd0 = s_filepath.openfile(path, mode='rb') fd1 = open(path, mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close()", "= os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs that exist path = os.path.join(temp_dir,", "self.true(s_filepath.isfile(path)) # container nested path that DNE path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar',", "f3 = b'Z' * 20 f3_path = os.path.join(temp_dir, 'junk') with open(f3_path, 'wb') as", "'dir*') def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=False)] self.eq([], diropen(path))", "= tempfile.mkdtemp() fbuf = b'A' * 20 temp_fd.write(fbuf) temp_fd.flush() # file and dir", "on dir path = os.path.join(temp_dir, 'dir*') def diropen(path): [f for f in s_filepath.openfiles(path,", "open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazr') with open(t_path, 'wb') as", "path = os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path)) # DNE in a real directory", "= 'A' * 20 bbuf = b'A' * 20 zip_fd.writestr('dir0/foo', abuf) fbuf2 =", "self.eq([], ret) # multiple open zip files tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1", "mode='rb') fs_fd = open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner file", "0 f = [fd for fd in s_filepath.openfiles(path, mode='rb', req=False)] for fd in", "* 20 f1_path = os.path.join(temp_dir, 'foo1') with open(f1_path, 'wb') as fd: fd.write(f1) f2", "exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested", "* 20 f2_path = os.path.join(temp_dir, 'foo2') with open(f2_path, 'wb') as fd: fd.write(f2) f3", "= b'Z' * 20 f3_path = os.path.join(temp_dir, 'junk') with open(f3_path, 'wb') as fd:", "'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested base directory that exists path =", "# multiple open on dir path = os.path.join(temp_dir, 'dir*') def diropen(path): [f for", "b'A' * 20 temp_fd.write(fbuf) temp_fd.flush() # file and dir that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir))", "self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested dir that exists path = os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2')", "path = os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file that exists in a directory", "tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path,", "open zip file path = temp_fd.name fd0 = s_filepath.openfile(path, mode='rb') fd1 = open(path,", "'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested dir that exists path = os.path.join(nested_temp_fd.name, 'ndir0',", "20 f3_path = os.path.join(temp_dir, 'junk') with open(f3_path, 'wb') as fd: fd.write(f3) # same", "= os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested dir that exists path =", "20 t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha') with open(t_path, 'wb') as fd:", "path = os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd =", "that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container", "base directory that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path))", "self.isin(buf, [f0, f1, f2]) fd.close() count += 1 self.eq(count, 3) path = os.path.join(temp_dir,", "20) self.isin(buf, [f0, f1, f2]) fd.close() fd_ct += 1 self.eq(fd_ct, 3) # multiple", "'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2'))", "zipfile import tempfile from synapse.tests.common import * import synapse.exc as s_exc import synapse.lib.filepath", "os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path", "[f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) ret = [a", "s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf) fd.close() # dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name,", "self.true(s_filepath.isfile(path)) # container nested base directory that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1',", "as fd: fd.write(f2) f3 = b'Z' * 20 f3_path = os.path.join(temp_dir, 'junk') with", "diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=False)] self.eq([], diropen(path)) # multiple", "exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path))", "fs_fd.close() fd.close() # open inner tar file path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar')", "'eir0', 'dir1', 'bravo') with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazr')", "that exists path = os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs that exist", "t1) fd.close() count += 1 self.eq(count, 6) tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self):", "* 20 temp_fd.write(fbuf) temp_fd.flush() # file and dir that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/'))", "in s_filepath.openfiles(path, mode='rb'): buf = fd.read() self.eq(len(buf), 20) self.eq(buf, t1) fd.close() count +=", "s_filepath.openfiles(path, mode='rb'): buf = fd.read() self.eq(len(buf), 20) self.eq(buf, t1) fd.close() count += 1", "'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE file in a real directory", "fd = s_filepath.openfile(path, mode='rb') buf = b'A' * 20 buf += b'\\n' self.eq(fd.read(),", "os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2')) f0", "= open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner file path =", "[f for f in s_filepath.openfiles(path, mode='rb', req=False)] self.eq([], diropen(path)) # multiple open on", "fd.close() temp_fd.close() temp_fd.close() def test_filepath_tar(self): # container is path path = getTestPath('nest2.tar') self.true(s_filepath.exists(path))", "self.eq(fd.read(), bbuf) fd.close() temp_fd.close() temp_fd.close() def test_filepath_tar(self): # container is path path =", "req=False)) # open a directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir) def", "(temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False)) # open", "'nest1.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close()", "getTestPath('nest2.tar', 'nnfoo') fd = s_filepath.openfile(path, mode='rb') buf = b'A' * 20 buf +=", "temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer',", "exists path = os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs that exist path", "directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile()", "tar path = getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested base directory", "= os.path.join(temp_dir, 'dir*') def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=False)]", "'A' * 20 bbuf = b'A' * 20 zip_fd.writestr('dir0/foo', abuf) fbuf2 = 'B'", "path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) #", "zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close()", "self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs that exist path = os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path))", "fd.read() fd.seek(0) self.eq(len(buf), 20) self.eq(buf, fd.read()) self.isin(buf, [f0, f1, f2]) fd.close() count +=", "= os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists", "= b'A' * 20 f0_path = os.path.join(temp_dir, 'foo0') with open(f0_path, 'wb') as fd:", "def test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo',", "directory path = os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path", "'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path =", "path = getTestPath('nest2.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read())", "os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path)) # open zip file path = temp_fd.name fd0 =", "self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path = getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path))", "path = getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): # open tar file", "fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() #", "20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() # container is path path", "self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) # open regular file fd = s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf)", "zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd0.writestr('eir0/dir3/z1',", "a real directory path = os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) # open", "import synapse.lib.filepath as s_filepath class TestFilePath(SynTest): def test_filepath_glob(self): temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0'))", "file that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) #", "arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1", "self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file that exists in a tar path = getTestPath('nest2.tar', 'nndir0',", "'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile() zip_fd =", "abuf = 'A' * 20 bbuf = b'A' * 20 zip_fd.writestr('dir0/foo', abuf) fbuf2", "and dir that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE in a", "= zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20)", "'w') zip_fd.writestr('foo', 'A' * 20) zip_fd.writestr('dir0/bar', 'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20)", "'alpha') with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1',", "exists path = os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path)) # DNE in a real", "'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested file that exists path = getTestPath('nest2.tar', 'nndir0',", "for fd in f: buf = fd.read() self.eq(len(buf), 20) self.isin(buf, [f0, f1, f2])", "open(f1_path, 'wb') as fd: fd.write(f1) f2 = b'C' * 20 f2_path = os.path.join(temp_dir,", "tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close()", "fd.write(f1) f2 = b'C' * 20 f2_path = os.path.join(temp_dir, 'foo2') with open(f2_path, 'wb')", "open zip files tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1 = tempfile.NamedTemporaryFile() ttfd0 =", "diropen(path): [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) path =", "tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp() fbuf", "zip_fd.writestr('foo', 'A' * 20) zip_fd.writestr('dir0/bar', 'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20) zip_fd.close()", "self.eq(count, 3) path = os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*') count = 0 for", "'Z' * 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count", "self.eq(len(buf), 20) self.eq(buf, fd.read()) self.isin(buf, [f0, f1, f2]) fd.close() count += 1 self.eq(count,", "shutil import tarfile import zipfile import tempfile from synapse.tests.common import * import synapse.exc", "getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE file in", "path) path = os.path.join(temp_dir, 'dir*') def diropen(path): return [f for f in s_filepath.openfiles(path,", "= os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested DNE path", "fd = s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf) fd.close() temp_fd.close() temp_fd.close() def test_filepath_tar(self): # container", "= b'B' * 20 f1_path = os.path.join(temp_dir, 'foo1') with open(f1_path, 'wb') as fd:", "# container nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo')", "file in a real directory path = getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path))", "os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read()", "for a in s_filepath.openfiles(None)] self.eq([], ret) # multiple open zip files tzfd0 =", "test_filepath_tar(self): # container is path path = getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory", "'w') zip_fd.writestr('aaa', 'A' * 20) zip_fd.writestr('ndir0/bbb', 'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20)", "exist path = os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open a dir fp", "for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) ret = [a for", "zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name,", "# dirs that exist path = os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open", "# open regular file fd = s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf) fd.close() # dne", "open regular file fd = s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf) fd.close() # dne path", "self.true(s_filepath.isdir(path)) # container nested dir that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0')", "'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested path", "self.true(s_filepath.isfile(path)) # container nested DNE path path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne')", "os.path.join(temp_dir, 'dir*') def diropen(path): [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen,", "os.path.join(temp_dir, 'foo*') fd_ct = 0 f = [fd for fd in s_filepath.openfiles(path, mode='rb',", "'dir0') self.none(fp.open()) # multiple open regular files path = os.path.join(temp_dir, 'foo*') fd_ct =", "s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner", "f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path", "that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path))", "getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = getTestPath('nest2.tar', 'dne')", "= s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open", "file and dir that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE in", "file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) #", "'baz.tar'), 'w') zfd0 = zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk',", "test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A'", "exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE in a real directory path", "# file and dir that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE", "in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) path = os.path.join(temp_dir, 'dir*') def diropen(path):", "zip_fd.close() # file that exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'foo')", "that exists path = os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested dir that", "'wb') as fd: fd.write(f0) f1 = b'B' * 20 f1_path = os.path.join(temp_dir, 'foo1')", "b'C' * 20 f2_path = os.path.join(temp_dir, 'foo2') with open(f2_path, 'wb') as fd: fd.write(f2)", "20) zip_fd.writestr('dir0/bar', 'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20) zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name,", "'ndir0', 'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested DNE path path =", "'dne*') def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen,", "self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path))", "with open(f2_path, 'wb') as fd: fd.write(f2) f3 = b'Z' * 20 f3_path =", "* 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20) zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A'", "s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None,", "= s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) # multiple open regular files path = os.path.join(temp_dir, 'foo*')", "open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner file path = getTestPath('nest2.tar',", "with open(f0_path, 'wb') as fd: fd.write(f0) f1 = b'B' * 20 f1_path =", "in a real directory path = os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) #", "temp_fd.write(fbuf) temp_fd.flush() # file and dir that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name))", "'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE file in a real directory path", "f in s_filepath.openfiles(path, mode='rb', req=False)] self.eq([], diropen(path)) # multiple open on dne path", "self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file that exists in a directory path = os.path.join(temp_fd.name, 'dir0',", "[fd for fd in s_filepath.openfiles(path, mode='rb', req=False)] for fd in f: buf =", "'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner file path = getTestPath('nest2.tar', 'nnfoo')", "# DNE file in a real directory path = getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path))", "DNE base path = os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self):", "self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open a dir fp = s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) # multiple", "'dir*') def diropen(path): [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path)", "fs_fd.close() fd.close() # open inner file path = getTestPath('nest2.tar', 'nnfoo') fd = s_filepath.openfile(path,", "temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name,", "regular files path = os.path.join(temp_dir, 'foo*') fd_ct = 0 f = [fd for", "os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'):", "file that exists in a tar path = getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path))", "self.eq(fd.read(), fbuf) fd.close() # dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'), mode='rb')", "* 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count =", "# same files alpha/bravo t1 = b'a' * 20 t_path = os.path.join(temp_dir, 'bazer',", "= os.path.join(temp_fd.name, 'dir0', 'foo') fd = s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf) fd.close() temp_fd.close() temp_fd.close()", "for f in s_filepath.openfiles(path, mode='rb', req=False)] self.eq([], diropen(path)) # multiple open on dne", "os.path.join(temp_dir, 'foo1') with open(f1_path, 'wb') as fd: fd.write(f1) f2 = b'C' * 20", "t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha') with open(t_path, 'wb') as fd: fd.write(t1)", "path = getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file that exists in a tar", "getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested file that exists", "f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1)", "self.eq(len(buf), 20) self.isin(buf, [f0, f1, f2]) fd.close() fd_ct += 1 self.eq(fd_ct, 3) #", "container nested base directory that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0')", "a directory path = os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path)) # open zip file path", "fd.write(t1) # files that exists path = os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) #", "that exists path = os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file that exists in", "that exists in a tar path = getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) #", "# multiple open regular files path = os.path.join(temp_dir, 'foo*') fd_ct = 0 f", "regular file fd = s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf) fd.close() # dne path self.raises(s_exc.NoSuchPath,", "fd: fd.write(f3) # same files alpha/bravo t1 = b'a' * 20 t_path =", "f: buf = fd.read() self.eq(len(buf), 20) self.isin(buf, [f0, f1, f2]) fd.close() fd_ct +=", "= fd.read() self.eq(len(buf), 20) self.isin(buf, [f0, f1, f2]) fd.close() fd_ct += 1 self.eq(fd_ct,", "container is path path = getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists", "zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path =", "path path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base", "req=False)] for fd in f: buf = fd.read() self.eq(len(buf), 20) self.isin(buf, [f0, f1,", "'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path))", "open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner tar file path =", "zip file path = os.path.join(temp_fd.name, 'dir0', 'foo') fd = s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf)", "= nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path = os.path.join(temp_fd.name, 'dir0')", "'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path =", "path = os.path.join(temp_dir, 'dir*') def diropen(path): [f for f in s_filepath.openfiles(path, mode='rb', req=True)]", "tempfile.mkdtemp() fbuf = b'A' * 20 temp_fd.write(fbuf) temp_fd.flush() # file and dir that", "f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0", "self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE file in a real directory path = getTestPath('nest2.tar',", "'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2')", "in s_filepath.openfiles(path, mode='rb', req=False)] for fd in f: buf = fd.read() self.eq(len(buf), 20)", "same files alpha/bravo t1 = b'a' * 20 t_path = os.path.join(temp_dir, 'bazer', 'eir0',", "class TestFilePath(SynTest): def test_filepath_glob(self): temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2'))", "getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path = getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path))", "req=False)] self.eq([], diropen(path)) # multiple open on dne path = os.path.join(temp_dir, 'dne*') def", "s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner", "* 20) zip_fd.writestr('ndir0/bbb', 'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close()", "s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) ret = [a for a in s_filepath.openfiles(None)]", "'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested path that DNE path =", "path = os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested dir that exists path", "multiple open zip files tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1 = tempfile.NamedTemporaryFile() ttfd0", "directory path = os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) # open regular file", "= getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file that exists in a tar path", "file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo')", "self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base file that exists path = os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path))", "buf = fd.read() self.eq(len(buf), 20) self.eq(buf, t1) fd.close() count += 1 self.eq(count, 6)", "open(path, mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close() # open inner zip file path =", "f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) path = os.path.join(temp_dir, 'dir*') def", "20) self.eq(buf, fd.read()) self.isin(buf, [f0, f1, f2]) fd.close() count += 1 self.eq(count, 3)", "self.true(s_filepath.isdir(path)) # container nested file that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0',", "= getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path = getTestPath('nest2.tar', 'nndir0')", "DNE in a real directory path = os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path)", "temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A' *", "= os.path.join(temp_dir, 'junk') with open(f3_path, 'wb') as fd: fd.write(f3) # same files alpha/bravo", "t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo') with open(t_path, 'wb') as fd: fd.write(t1)", "+= 1 self.eq(count, 6) tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile()", "os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = os.path.join(temp_fd.name, 'dne')", "= os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile()", "path = os.path.join(temp_fd.name, 'dir0', 'foo') fd = s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf) fd.close() temp_fd.close()", "os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs that exist path = os.path.join(temp_dir, 'dir*')", "= [a for a in s_filepath.openfiles(None)] self.eq([], ret) # multiple open zip files", "fd.close() # dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile,", "os.path.join(temp_dir, 'dir*') def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=False)] self.eq([],", "= 'B' * 20 zip_fd.writestr('bar', fbuf2) zip_fd.close() # file that exists in a", "b'A' * 20 f0_path = os.path.join(temp_dir, 'foo0') with open(f0_path, 'wb') as fd: fd.write(f0)", "getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists", "count += 1 self.eq(count, 3) path = os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*') count", "'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo') with open(t_path,", "= fd.read() self.eq(len(buf), 20) self.eq(buf, t1) fd.close() count += 1 self.eq(count, 6) tzfd0.close()", "self.false(s_filepath.isfile(path)) # DNE file in a real directory path = getTestPath('nest2.tar', 'nndir0', 'dne')", "os.path.join(temp_dir, 'bazr') with open(t_path, 'wb') as fd: fd.write(t1) # files that exists path", "= 0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() self.eq(len(buf), 20) self.eq(buf,", "s_filepath.openfile(path, mode='rb') buf = b'A' * 20 buf += b'\\n' self.eq(fd.read(), buf) fd.close()", "DNE base path = getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): # open", "path path = getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path =", "'ndir0', 'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base file that exists path", "def diropen(path): return [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path)", "dir that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container", "fd1.read()) fd0.close() fd1.close() # open inner zip file path = os.path.join(temp_fd.name, 'dir0', 'foo')", "with open(t_path, 'wb') as fd: fd.write(t1) # files that exists path = os.path.join(temp_dir,", "* 20 f0_path = os.path.join(temp_dir, 'foo0') with open(f0_path, 'wb') as fd: fd.write(f0) f1", "self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd", "zip_fd.writestr('bar', fbuf2) zip_fd.close() # file that exists in a directory path = os.path.join(temp_fd.name,", "path = getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested base directory that", "'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested path that", "[f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) path = os.path.join(temp_dir,", "is path path = nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path", "= getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested base directory that exists", "exists path = os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested dir that exists", "'dir1', 'bravo') with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazr') with", "req=False)) temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd =", "a tar path = getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested base", "self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open a dir fp = s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) #", "ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0 = zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1)", "self.false(s_filepath.isdir(temp_fd.name)) # DNE in a real directory path = os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath,", "'eir0', 'dir*', '*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf =", "directory that exists path = getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file that exists", "test_filepath_glob(self): temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir,", "tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1'))", "'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs that exist path = os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path))", "'eir0', 'dir1', 'alpha') with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazer',", "= s_filepath.openfile(path, mode='rb') buf = b'A' * 20 buf += b'\\n' self.eq(fd.read(), buf)", "# open inner file path = getTestPath('nest2.tar', 'nnfoo') fd = s_filepath.openfile(path, mode='rb') buf", "os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested DNE path path", "zfd0 = zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' *", "for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() self.eq(len(buf), 20) self.eq(buf, t1) fd.close()", "tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z'", "ret = [a for a in s_filepath.openfiles(None)] self.eq([], ret) # multiple open zip", "'*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() self.eq(len(buf),", "self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile() zip_fd", "= os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested dir that exists path =", "'dir1', 'dir2') self.true(s_filepath.isdir(path)) # DNE in a real directory path = os.path.join(temp_fd.name, 'dir0',", "self.eq(len(buf), 20) self.eq(buf, t1) fd.close() count += 1 self.eq(count, 6) tzfd0.close() tzfd1.close() ttfd0.close()", "'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested file that exists path = getTestPath('nest2.tar',", "= zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2', f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20)", "tzfd1 = tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0 = zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0',", "path path = nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path =", "= tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0',", "b'Z' * 20 f3_path = os.path.join(temp_dir, 'junk') with open(f3_path, 'wb') as fd: fd.write(f3)", "1 self.eq(fd_ct, 3) # multiple open on dir path = os.path.join(temp_dir, 'dir*') def", "6) tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp()", "path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested file", "fd = s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf) fd.close() # dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s'", "req=True)] self.raises(s_exc.NoSuchPath, diropen, path) path = os.path.join(temp_dir, 'dir*') def diropen(path): return [f for", "s_filepath._pathClass, path) # open regular file fd = s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf) fd.close()", "self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested DNE path path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0',", "fbuf2) zip_fd.close() # file that exists in a directory path = os.path.join(temp_fd.name, 'dir0',", "zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() # container is path path =", "multiple open regular files path = os.path.join(temp_dir, 'foo*') fd_ct = 0 f =", "nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path))", "tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1 = tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w')", "exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested", "in a real directory path = getTestPath('nest2.tar', 'nndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) #", "= s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf) fd.close() # dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' %", "'bazr') with open(t_path, 'wb') as fd: fd.write(t1) # files that exists path =", "os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open a dir fp = s_filepath.parsePath(temp_dir, 'dir0')", "fbuf2 = 'B' * 20 zip_fd.writestr('bar', fbuf2) zip_fd.close() # file that exists in", "= open(path, mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close() # open inner zip file path", "self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close() # open inner zip file path = os.path.join(temp_fd.name, 'dir0',", "# open inner zip file path = os.path.join(temp_fd.name, 'dir0', 'foo') fd = s_filepath.openfile(path,", "self.true(s_filepath.isdir(path)) # DNE in a real directory path = os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path))", "zip_fd.writestr('aaa', 'A' * 20) zip_fd.writestr('ndir0/bbb', 'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20) zip_fd.write(temp_fd.name,", "'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() # container is", "'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file that exists in a tar path = getTestPath('nest2.tar',", "zip files tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1 = tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir,", "1 self.eq(count, 3) path = os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*') count = 0", "[a for a in s_filepath.openfiles(None)] self.eq([], ret) # multiple open zip files tzfd0", "fd.close() # open inner file path = getTestPath('nest2.tar', 'nnfoo') fd = s_filepath.openfile(path, mode='rb')", "arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name,", "f2]) fd.close() count += 1 self.eq(count, 3) path = os.path.join(temp_dir, 'baz*', 'eir0', 'dir*',", "= fd.read() fd.seek(0) self.eq(len(buf), 20) self.eq(buf, fd.read()) self.isin(buf, [f0, f1, f2]) fd.close() count", "s_filepath class TestFilePath(SynTest): def test_filepath_glob(self): temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'dir0')) os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir,", "zip_fd.close() # container is path path = nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory", "'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2')) f0 = b'A' * 20 f0_path =", "* 20 t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha') with open(t_path, 'wb') as", "real directory path = os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) # open regular", "'A' * 20) zip_fd.writestr('dir0/bar', 'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20) zip_fd.close() zip_fd", "os.path.join(temp_fd.name, 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested dir that exists path = os.path.join(nested_temp_fd.name,", "io import shutil import tarfile import zipfile import tempfile from synapse.tests.common import *", "= open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner tar file path", "= temp_fd.name fd0 = s_filepath.openfile(path, mode='rb') fd1 = open(path, mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close()", "'junk') with open(f3_path, 'wb') as fd: fd.write(f3) # same files alpha/bravo t1 =", "path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that", "def test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp() fbuf = b'A' * 20", "open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1 = tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0 =", "arcname='ndir0/nested.tar') zfd1.close() path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count = 0 for fd in s_filepath.openfiles(path,", "temp_dir = tempfile.mkdtemp() fbuf = b'A' * 20 temp_fd.write(fbuf) temp_fd.flush() # file and", "f2_path = os.path.join(temp_dir, 'foo2') with open(f2_path, 'wb') as fd: fd.write(f2) f3 = b'Z'", "zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count = 0 for", "temp_fd.flush() # file and dir that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) #", "path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile,", "# container is path path = getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that", "file that exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path)) #", "= getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container", "path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE", "'%s%s' % (temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False))", "'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path)) # DNE in a real directory path = os.path.join(temp_fd.name,", "zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20) zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A' * 20)", "mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) path = os.path.join(temp_dir, 'dir*') def diropen(path): return [f", "nested dir that exists path = os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path)) # DNE", "= os.path.join(temp_dir, 'foo*') fd_ct = 0 f = [fd for fd in s_filepath.openfiles(path,", "def test_filepath_tar(self): # container is path path = getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base", "test_filepath_tar_open(self): # open tar file path = getTestPath('nest2.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd", "'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path = os.path.join(nested_temp_fd.name,", "zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd1.write(tzfd0.name, arcname='ndir0/nested.zip') zfd1.write(ttfd0.name, arcname='ndir0/nested.tar') zfd1.close() path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*')", "self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def", "# container nested dir that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path))", "import tempfile from synapse.tests.common import * import synapse.exc as s_exc import synapse.lib.filepath as", "s_filepath.openfiles(path, mode='rb'): buf = fd.read() fd.seek(0) self.eq(len(buf), 20) self.eq(buf, fd.read()) self.isin(buf, [f0, f1,", "files alpha/bravo t1 = b'a' * 20 t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1',", "nested dir that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) #", "os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha') with open(t_path, 'wb') as fd: fd.write(t1) t_path =", "mode='rb'): buf = fd.read() self.eq(len(buf), 20) self.eq(buf, t1) fd.close() count += 1 self.eq(count,", "a directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd =", "'A' * 20) zip_fd.writestr('ndir0/bbb', 'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip')", "= b'C' * 20 f2_path = os.path.join(temp_dir, 'foo2') with open(f2_path, 'wb') as fd:", "= os.path.join(temp_dir, 'bazr') with open(t_path, 'wb') as fd: fd.write(t1) # files that exists", "'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested DNE path path = os.path.join(nested_temp_fd.name, 'ndir0',", "self.false(s_filepath.isdir(path)) temp_fd.close() nested_temp_fd.close() def test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') abuf", "file path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd =", "path = os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open a dir fp =", "= open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1 = tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0", "'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2')) f0 = b'A' *", "fd_ct += 1 self.eq(fd_ct, 3) # multiple open on dir path = os.path.join(temp_dir,", "zip_fd.writestr('dir0/bar', 'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20) zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w')", "# open a directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self):", "zip_fd.close() zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A' * 20) zip_fd.writestr('ndir0/bbb', 'A' * 20)", "s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf) fd.close() temp_fd.close() temp_fd.close() def test_filepath_tar(self): # container is path", "# container nested file that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar')", "'ndir0', 'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested path that DNE path", "file that exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path))", "test_filepath_zip_open(self): temp_fd = tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') abuf = 'A' * 20", "bbuf) fd.close() temp_fd.close() temp_fd.close() def test_filepath_tar(self): # container is path path = getTestPath('nest2.tar')", "self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested path that DNE path = getTestPath('nest2.tar', 'nndir0', 'nndir1',", "os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2')) f0 = b'A' * 20", "tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name, 'w')", "f0 = b'A' * 20 f0_path = os.path.join(temp_dir, 'foo0') with open(f0_path, 'wb') as", "path = nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path = os.path.join(temp_fd.name,", "open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo') with", "mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) ret = [a for a in s_filepath.openfiles(None)] self.eq([],", "'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested file that exists path =", "= tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') abuf = 'A' * 20 bbuf =", "path) # open regular file fd = s_filepath.openfile(temp_fd.name, mode='rb') self.eq(fd.read(), fbuf) fd.close() #", "getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested base directory that exists path", "'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested base directory that exists path = getTestPath('nest2.tar',", "= getTestPath('nest2.tar', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): # open tar file path", "file path = os.path.join(temp_fd.name, 'dir0', 'foo') fd = s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf) fd.close()", "'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base path = os.path.join(temp_fd.name, 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path))", "in s_filepath.openfiles(None)] self.eq([], ret) # multiple open zip files tzfd0 = open(os.path.join(temp_dir, 'baz.zip'),", "ret) # multiple open zip files tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1 =", "zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A' * 20) zip_fd.writestr('ndir0/bbb', 'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc',", "f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) ret = [a for a", "# file that exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path))", "# container nested file that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0',", "'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) def test_filepath_tar_open(self): # open tar file path = getTestPath('nest2.tar')", "inner tar file path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar') fd = s_filepath.openfile(path, mode='rb')", "with open(t_path, 'wb') as fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazr') with open(t_path, 'wb')", "= tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A' * 20) zip_fd.writestr('dir0/bar', 'A' *", "self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base file that exists path = os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path))", "self.true(s_filepath.isfile(path)) # nested dir that exists path = os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path))", "that exist self.true(s_filepath.exists(temp_fd.name)) self.true(s_filepath.exists(temp_dir)) self.true(s_filepath.exists('/')) self.false(s_filepath.isfile(temp_dir)) self.false(s_filepath.isdir(temp_fd.name)) # DNE in a real directory", "fd.read() self.eq(len(buf), 20) self.isin(buf, [f0, f1, f2]) fd.close() fd_ct += 1 self.eq(fd_ct, 3)", "fp = s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) # multiple open regular files path = os.path.join(temp_dir,", "mode='r') self.eq(fd.read(), bbuf) fd.close() temp_fd.close() temp_fd.close() def test_filepath_tar(self): # container is path path", "= b'A' * 20 zip_fd.writestr('dir0/foo', abuf) fbuf2 = 'B' * 20 zip_fd.writestr('bar', fbuf2)", "os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo') with open(t_path, 'wb') as fd: fd.write(t1) t_path =", "container nested dir that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path))", "base file that exists path = os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file that", "that exists path = os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path)) # DNE in a", "import shutil import tarfile import zipfile import tempfile from synapse.tests.common import * import", "tfd0 = tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path,", "exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path)) # open zip", "temp_fd.close() temp_fd.close() def test_filepath_tar(self): # container is path path = getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path))", "'baz.zip'), 'w') tzfd1 = tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0 = zipfile.ZipFile(tzfd0.name,", "'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base file that exists path = os.path.join(temp_fd.name, 'foo')", "that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested", "* 20 bbuf = b'A' * 20 zip_fd.writestr('dir0/foo', abuf) fbuf2 = 'B' *", "tar file path = getTestPath('nest2.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest2.tar'), 'rb')", "for fd in s_filepath.openfiles(path, mode='rb', req=False)] for fd in f: buf = fd.read()", "t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1') tfd0.add(f2_path,", "synapse.tests.common import * import synapse.exc as s_exc import synapse.lib.filepath as s_filepath class TestFilePath(SynTest):", "'dir0', 'foo') fd = s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf) fd.close() temp_fd.close() temp_fd.close() def test_filepath_tar(self):", "# DNE in a real directory path = os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass,", "dir fp = s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) # multiple open regular files path =", "# dne path self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None)", "'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested path that DNE path = getTestPath('nest2.tar', 'nndir0',", "'dir2')) f0 = b'A' * 20 f0_path = os.path.join(temp_dir, 'foo0') with open(f0_path, 'wb')", "self.raises(s_exc.NoSuchPath, diropen, path) ret = [a for a in s_filepath.openfiles(None)] self.eq([], ret) #", "self.eq(count, 6) tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd = tempfile.NamedTemporaryFile() temp_dir =", "tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0 = zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1',", "[f0, f1, f2]) fd.close() count += 1 self.eq(count, 3) path = os.path.join(temp_dir, 'baz*',", "in a tar path = getTestPath('nest2.tar', 'nndir0', 'nnbar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested", "for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() fd.seek(0) self.eq(len(buf), 20) self.eq(buf, fd.read())", "fd.write(t1) t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo') with open(t_path, 'wb') as fd:", "'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) # open regular file fd = s_filepath.openfile(temp_fd.name, mode='rb')", "fd.close() # open inner tar file path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar') fd", "= zipfile.ZipFile(temp_fd.name, 'w') zip_fd.writestr('foo', 'A' * 20) zip_fd.writestr('dir0/bar', 'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C'", "that exist path = os.path.join(temp_dir, 'dir*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # open a dir", "nested DNE path path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path))", "s_filepath.openfiles(path, mode='rb', req=False)] for fd in f: buf = fd.read() self.eq(len(buf), 20) self.isin(buf,", "os.mkdir(os.path.join(temp_dir, 'dir1')) os.mkdir(os.path.join(temp_dir, 'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0',", "that exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path)) # open", "container nested DNE path path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path))", "20 f2_path = os.path.join(temp_dir, 'foo2') with open(f2_path, 'wb') as fd: fd.write(f2) f3 =", "count += 1 self.eq(count, 6) tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def test_filepath_regular(self): temp_fd =", "in a directory path = os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested dir", "os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2')) f0 = b'A'", "bbuf = b'A' * 20 zip_fd.writestr('dir0/foo', abuf) fbuf2 = 'B' * 20 zip_fd.writestr('bar',", "'dir2')) os.mkdir(os.path.join(temp_dir, 'fooD')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1')) os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2')) f0 =", "= getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE file", "open inner file path = getTestPath('nest2.tar', 'nnfoo') fd = s_filepath.openfile(path, mode='rb') buf =", "= zipfile.ZipFile(temp_fd.name, 'w') abuf = 'A' * 20 bbuf = b'A' * 20", "file path = getTestPath('nest2.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(),", "zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd1.write(tzfd0.name,", "fd: fd.write(f1) f2 = b'C' * 20 f2_path = os.path.join(temp_dir, 'foo2') with open(f2_path,", "'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE file in a real directory path =", "os.path.join(temp_dir, 'foo2') with open(f2_path, 'wb') as fd: fd.write(f2) f3 = b'Z' * 20", "mode='rb', req=False)] for fd in f: buf = fd.read() self.eq(len(buf), 20) self.isin(buf, [f0,", "'Z' * 20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path,", "files that exists path = os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs that", "arcname='eir0/dir6/t2') tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2', f2) zfd1.writestr('dir0/dir1/dir2/junk',", "'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base file that exists path =", "f2) zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name,", "from synapse.tests.common import * import synapse.exc as s_exc import synapse.lib.filepath as s_filepath class", "s_exc import synapse.lib.filepath as s_filepath class TestFilePath(SynTest): def test_filepath_glob(self): temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir,", "DNE in a real directory path = os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path))", "arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0) zfd1.writestr('dir0/dir1/dir2/bar1', f1) zfd1.writestr('dir0/dir1/dir2/bar2',", "that exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) #", "nested base directory that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path))", "fs_fd.read()) fs_fd.close() fd.close() # open inner tar file path = getTestPath('nest2.tar', 'nndir0', 'nndir1',", "'B' * 20 zip_fd.writestr('bar', fbuf2) zip_fd.close() # file that exists in a directory", "self.true(s_filepath.isfile(path)) # base directory that exists path = getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) #", "20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name, 'w') tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0') tfd0.add(f1_path,", "'foo') fd = s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf) fd.close() temp_fd.close() temp_fd.close() def test_filepath_tar(self): #", "dne path = os.path.join(temp_dir, 'dne*') def diropen(path): return [f for f in s_filepath.openfiles(path,", "'nndir0', 'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested file that exists path", "'nndir1', 'nest1.tar', 'ndir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isdir(path)) self.false(s_filepath.isfile(path)) # DNE file in a real", "import tarfile import zipfile import tempfile from synapse.tests.common import * import synapse.exc as", "files tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w') tzfd1 = tempfile.NamedTemporaryFile() ttfd0 = open(os.path.join(temp_dir, 'baz.tar'),", "abuf) fbuf2 = 'B' * 20 zip_fd.writestr('bar', fbuf2) zip_fd.close() # file that exists", "# open a dir fp = s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) # multiple open regular", "open regular files path = os.path.join(temp_dir, 'foo*') fd_ct = 0 f = [fd", "'dir0/dir1/dir2/bar*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read() fd.seek(0)", "self.false(s_filepath.isdir(path)) # open a dir fp = s_filepath.parsePath(temp_dir, 'dir0') self.none(fp.open()) # multiple open", "temp_fd = tempfile.NamedTemporaryFile() temp_dir = tempfile.mkdtemp() fbuf = b'A' * 20 temp_fd.write(fbuf) temp_fd.flush()", "container nested file that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar') self.true(s_filepath.exists(path))", "'dir*', '*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf = fd.read()", "mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd =", "* 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() # container is path path = nested_temp_fd.name self.true(s_filepath.exists(path))", "# container nested path that DNE path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0',", "= os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) # open regular file fd =", "f1_path = os.path.join(temp_dir, 'foo1') with open(f1_path, 'wb') as fd: fd.write(f1) f2 = b'C'", "return [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) ret =", "fd0 = s_filepath.openfile(path, mode='rb') fd1 = open(path, mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close() #", "fd.write(f2) f3 = b'Z' * 20 f3_path = os.path.join(temp_dir, 'junk') with open(f3_path, 'wb')", "t1 = b'a' * 20 t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha') with", "req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd = tempfile.NamedTemporaryFile() nested_temp_fd = tempfile.NamedTemporaryFile()", "as fd: fd.write(f3) # same files alpha/bravo t1 = b'a' * 20 t_path", "def diropen(path): [f for f in s_filepath.openfiles(path, mode='rb', req=True)] self.raises(s_exc.NoSuchPath, diropen, path) path", "= open(os.path.join(temp_dir, 'baz.tar'), 'w') zfd0 = zipfile.ZipFile(tzfd0.name, 'w') zfd0.writestr('dir0/dir1/dir2/foo0', f0) zfd0.writestr('dir0/dir1/dir2/foo1', f1) zfd0.writestr('dir0/dir1/dir2/foo2',", "zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20) zfd0.writestr('eir0/dir3/z1', t1) zfd0.writestr('eir0/dir4/z2', t1) zfd0.close() tfd0 = tarfile.TarFile(ttfd0.name, 'w')", "fs_fd.read()) fs_fd.close() fd.close() # open inner file path = getTestPath('nest2.tar', 'nnfoo') fd =", "that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container", "20 zip_fd.writestr('bar', fbuf2) zip_fd.close() # file that exists in a directory path =", "self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False)) # open a directory self.none(s_filepath.openfile('/tmp',", "= s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open", "os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file that exists in a directory path =", "os.path.join(temp_fd.name, 'dir0', 'bar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # nested dir that exists path = os.path.join(temp_fd.name,", "* 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() # container is path", "= os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # base file that", "= s_filepath.openfile(path, mode='r') self.eq(fd.read(), bbuf) fd.close() temp_fd.close() temp_fd.close() def test_filepath_tar(self): # container is", "'nndir0', 'nndir1', 'nest1.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest1.tar'), 'rb') self.eq(fd.read(), fs_fd.read())", "'nest1.tar', 'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path = getTestPath('nest2.tar',", "s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False)) # open a directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False))", "= getTestPath('nest2.tar', 'nnfoo') fd = s_filepath.openfile(path, mode='rb') buf = b'A' * 20 buf", "'baz*', 'eir0', 'dir*', '*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf", "f = [fd for fd in s_filepath.openfiles(path, mode='rb', req=False)] for fd in f:", "self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close() # open inner file path = getTestPath('nest2.tar', 'nnfoo') fd", "'wb') as fd: fd.write(f2) f3 = b'Z' * 20 f3_path = os.path.join(temp_dir, 'junk')", "path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*') count = 0 for fd in s_filepath.openfiles(path, mode='rb'): buf", "path = os.path.join(temp_dir, 'dne') self.false(s_filepath.exists(path)) self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path) # open regular file fd", "with open(f1_path, 'wb') as fd: fd.write(f1) f2 = b'C' * 20 f2_path =", "# files that exists path = os.path.join(temp_dir, 'foo*') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # dirs", "s_filepath.openfiles(path, mode='rb', req=False)] self.eq([], diropen(path)) # multiple open on dne path = os.path.join(temp_dir,", "'wb') as fd: fd.write(f3) # same files alpha/bravo t1 = b'a' * 20", "getTestPath('nest2.tar') fd = s_filepath.openfile(path, mode='rb') fs_fd = open(getTestPath('nest2.tar'), 'rb') self.eq(fd.read(), fs_fd.read()) fs_fd.close() fd.close()", "fd.write(f3) # same files alpha/bravo t1 = b'a' * 20 t_path = os.path.join(temp_dir,", "tempfile.NamedTemporaryFile() zip_fd = zipfile.ZipFile(temp_fd.name, 'w') abuf = 'A' * 20 bbuf = b'A'", "= os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo') with open(t_path, 'wb') as fd: fd.write(t1) t_path", "self.eq(buf, t1) fd.close() count += 1 self.eq(count, 6) tzfd0.close() tzfd1.close() ttfd0.close() shutil.rmtree(temp_dir) def", "= zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A' * 20) zip_fd.writestr('ndir0/bbb', 'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C'", "'w') abuf = 'A' * 20 bbuf = b'A' * 20 zip_fd.writestr('dir0/foo', abuf)", "base directory that exists path = getTestPath('nest2.tar', 'nndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # file that", "s_filepath.openfile(path, mode='rb') fd1 = open(path, mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close() # open inner", "= os.path.join(temp_fd.name, 'dir0', 'foo') self.true(s_filepath.isfile(path)) # open zip file path = temp_fd.name fd0", "* 20 f3_path = os.path.join(temp_dir, 'junk') with open(f3_path, 'wb') as fd: fd.write(f3) #", "arcname='dir0/dir1/dir2/foo2') tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0',", "<reponame>larrycameron80/synapse import io import shutil import tarfile import zipfile import tempfile from synapse.tests.common", "self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'), mode='rb') self.raises(s_exc.NoSuchPath, s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '')", "f1 = b'B' * 20 f1_path = os.path.join(temp_dir, 'foo1') with open(f1_path, 'wb') as", "'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file that exists in a directory path = os.path.join(temp_fd.name,", "zipfile.ZipFile(nested_temp_fd.name, 'w') zip_fd.writestr('aaa', 'A' * 20) zip_fd.writestr('ndir0/bbb', 'A' * 20) zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' *", "= os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2') self.true(s_filepath.isdir(path)) # DNE in a real directory path", "= 0 f = [fd for fd in s_filepath.openfiles(path, mode='rb', req=False)] for fd", "is path path = getTestPath('nest2.tar') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # base directory that exists path", "self.true(s_filepath.isdir(path)) # file that exists in a tar path = getTestPath('nest2.tar', 'nndir0', 'nnbar')", "self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested dir that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip',", "fd: fd.write(t1) t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo') with open(t_path, 'wb') as", "real directory path = os.path.join(temp_fd.name, 'dir0', 'dne') self.false(s_filepath.exists(path)) self.false(s_filepath.isfile(path)) self.false(s_filepath.isdir(path)) # DNE base", "* 20) zip_fd.writestr('dir0/bar', 'A' * 20) zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20) zip_fd.close() zip_fd =", "f2]) fd.close() fd_ct += 1 self.eq(fd_ct, 3) # multiple open on dir path", "self.true(s_filepath.isfile(path)) # file that exists in a directory path = os.path.join(temp_fd.name, 'dir0', 'bar')", "'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container nested path that DNE", "20) zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip') zip_fd.close() # container is path path = nested_temp_fd.name self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path))", "self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip',", "mode='rb', req=False)] self.eq([], diropen(path)) # multiple open on dne path = os.path.join(temp_dir, 'dne*')", "that exists path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # container", "'ndir0') self.true(s_filepath.exists(path)) self.true(s_filepath.isdir(path)) # container nested file that exists path = getTestPath('nest2.tar', 'nndir0',", "tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk') tfd0.add(t_path, arcname='eir0/dir5/t1') tfd0.add(t_path, arcname='eir0/dir6/t2') tfd0.close() zfd1 = zipfile.ZipFile(tzfd1.name, 'w') zfd1.writestr('dir0/dir1/dir2/bar0', f0)", "= [fd for fd in s_filepath.openfiles(path, mode='rb', req=False)] for fd in f: buf", "s_filepath.openfile, None) self.raises(s_exc.NoSuchPath, s_filepath.openfile, '') self.none(s_filepath.openfile(None, req=False)) # open a directory self.none(s_filepath.openfile('/tmp', mode='rb',", "fd1 = open(path, mode='rb') self.eq(fd0.read(), fd1.read()) fd0.close() fd1.close() # open inner zip file", "file that exists path = os.path.join(temp_fd.name, 'foo') self.true(s_filepath.exists(path)) self.true(s_filepath.isfile(path)) # file that exists", "as s_exc import synapse.lib.filepath as s_filepath class TestFilePath(SynTest): def test_filepath_glob(self): temp_dir = tempfile.mkdtemp()", "b'B' * 20 f1_path = os.path.join(temp_dir, 'foo1') with open(f1_path, 'wb') as fd: fd.write(f1)", "open a directory self.none(s_filepath.openfile('/tmp', mode='rb', req=False)) self.none(s_filepath.openfile('/', req=False)) temp_fd.close() shutil.rmtree(temp_dir) def test_filepath_zip(self): temp_fd" ]
[ "Profile exclude = [] fields = ['profilepic','bio','contact'] class EditUser(forms.ModelForm): class Meta: model =", "NewProject(forms.ModelForm): class Meta: model = Project exclude = ['likes', 'profile',] class EditProfile(forms.ModelForm): class", "class EditUser(forms.ModelForm): class Meta: model = User exclude = [] fields = ['first_name','last_name',", "['profilepic','bio','contact'] class EditUser(forms.ModelForm): class Meta: model = User exclude = [] fields =", "class NewProject(forms.ModelForm): class Meta: model = Project exclude = ['likes', 'profile',] class EditProfile(forms.ModelForm):", "EditProfile(forms.ModelForm): class Meta: model = Profile exclude = [] fields = ['profilepic','bio','contact'] class", "= Profile exclude = [] fields = ['profilepic','bio','contact'] class EditUser(forms.ModelForm): class Meta: model", "exclude = [] fields = ['profilepic','bio','contact'] class EditUser(forms.ModelForm): class Meta: model = User", "class Meta: model = Profile exclude = [] fields = ['profilepic','bio','contact'] class EditUser(forms.ModelForm):", "class Meta: model = Project exclude = ['likes', 'profile',] class EditProfile(forms.ModelForm): class Meta:", "model = Profile exclude = [] fields = ['profilepic','bio','contact'] class EditUser(forms.ModelForm): class Meta:", "= User exclude = [] fields = ['first_name','last_name', 'email'] class NewVote(forms.ModelForm): class Meta:", "Meta: model = Project exclude = ['likes', 'profile',] class EditProfile(forms.ModelForm): class Meta: model", "[] fields = ['profilepic','bio','contact'] class EditUser(forms.ModelForm): class Meta: model = User exclude =", "[] fields = ['first_name','last_name', 'email'] class NewVote(forms.ModelForm): class Meta: model = Vote exclude", "model = User exclude = [] fields = ['first_name','last_name', 'email'] class NewVote(forms.ModelForm): class", "['likes', 'profile',] class EditProfile(forms.ModelForm): class Meta: model = Profile exclude = [] fields", ".models import * class NewProject(forms.ModelForm): class Meta: model = Project exclude = ['likes',", "import * class NewProject(forms.ModelForm): class Meta: model = Project exclude = ['likes', 'profile',]", "model = Project exclude = ['likes', 'profile',] class EditProfile(forms.ModelForm): class Meta: model =", "Meta: model = Profile exclude = [] fields = ['profilepic','bio','contact'] class EditUser(forms.ModelForm): class", "= ['first_name','last_name', 'email'] class NewVote(forms.ModelForm): class Meta: model = Vote exclude = ['voter','project']", "exclude = ['likes', 'profile',] class EditProfile(forms.ModelForm): class Meta: model = Profile exclude =", "exclude = [] fields = ['first_name','last_name', 'email'] class NewVote(forms.ModelForm): class Meta: model =", "= [] fields = ['first_name','last_name', 'email'] class NewVote(forms.ModelForm): class Meta: model = Vote", "from django import forms from .models import * class NewProject(forms.ModelForm): class Meta: model", "= ['likes', 'profile',] class EditProfile(forms.ModelForm): class Meta: model = Profile exclude = []", "= Project exclude = ['likes', 'profile',] class EditProfile(forms.ModelForm): class Meta: model = Profile", "* class NewProject(forms.ModelForm): class Meta: model = Project exclude = ['likes', 'profile',] class", "'profile',] class EditProfile(forms.ModelForm): class Meta: model = Profile exclude = [] fields =", "= [] fields = ['profilepic','bio','contact'] class EditUser(forms.ModelForm): class Meta: model = User exclude", "= ['profilepic','bio','contact'] class EditUser(forms.ModelForm): class Meta: model = User exclude = [] fields", "EditUser(forms.ModelForm): class Meta: model = User exclude = [] fields = ['first_name','last_name', 'email']", "fields = ['profilepic','bio','contact'] class EditUser(forms.ModelForm): class Meta: model = User exclude = []", "class EditProfile(forms.ModelForm): class Meta: model = Profile exclude = [] fields = ['profilepic','bio','contact']", "import forms from .models import * class NewProject(forms.ModelForm): class Meta: model = Project", "Meta: model = User exclude = [] fields = ['first_name','last_name', 'email'] class NewVote(forms.ModelForm):", "from .models import * class NewProject(forms.ModelForm): class Meta: model = Project exclude =", "User exclude = [] fields = ['first_name','last_name', 'email'] class NewVote(forms.ModelForm): class Meta: model", "class Meta: model = User exclude = [] fields = ['first_name','last_name', 'email'] class", "django import forms from .models import * class NewProject(forms.ModelForm): class Meta: model =", "fields = ['first_name','last_name', 'email'] class NewVote(forms.ModelForm): class Meta: model = Vote exclude =", "Project exclude = ['likes', 'profile',] class EditProfile(forms.ModelForm): class Meta: model = Profile exclude", "forms from .models import * class NewProject(forms.ModelForm): class Meta: model = Project exclude" ]
[]
[ "= src_schema else: next_path = path + (key,) try: target[key] = schema_merge(target_schema, src_schema,", "sub_schema = properties[0] return sub_schema # pylint: disable=C0301 def traverse_raw_schema(schema: dict, path: tuple):", "Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" When unpacking a", "Any) -> OrderedSet: return ( OrderedSet(value) if isinstance(value, (list, OrderedSet)) else OrderedSet([value]) )", "Tuple[List[object], List[tuple]]: \"\"\"Traverse the paths for all sequence members in the document according", ">>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'},", "for part in path_parts: if isinstance(document, Sequence): part = int(part) parent = document", "reference under the remote base name inside the remote section. >>> rewrite_ref((BASE, \"foo\",", "that the reference is valid. :raises ValueError, LookupError: the reference is invalid for", "'a'}, {'type': OrderedSet(['b', 'c'])} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>>", "incorrect path provided\\n%s\\n%s\", path, e) return {} def traverse_path_for_sequence_members( document: dict, path_parts: Sequence,", ">>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\")) ([42], ('foo', 'bar'), {'bar': [42]}) >>> traverse({\"foo\":", "= schema.get(\"definitions\", {}) sub_properties = properties last_step = ( len(path) - 1 )", "k, v in item.items()} if isinstance(item, list): item = [item_hash(i) for i in", "of range \"\"\" if path is None: path = [] if not path_parts:", "path): # noqa: C901 # pylint: disable=R0912 \"\"\"Merges the src schema into the", ">>> rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar'", "json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest() def to_set(value: Any) -> OrderedSet: return ( OrderedSet(value)", "[0] should be a single $ref in subschema on the top level #", "is used directly -> # means that we need to check definition #", "be safe (always single value) # bc sub_schema is always per paranet property", "{'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])}", ") # pylint: disable=W0707 raise ConstraintError(msg, path, key, target_schema, src_schema) target[key] = src_schema", "| src_set try: # check if there are conflicting $ref and type #", ") -> Tuple[List[object], List[tuple]]: \"\"\" When unpacking a sequence, we need to include", "document: The remaining document is traversed using the remaining path parts. The list", ">>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\")) ([[42, 43, 44]], [('foo', 'bar')])", "{}, ()) {} >>> schema_merge({'foo': 'a'}, {}, ()) {'foo': 'a'} >>> schema_merge({}, {'foo':", "{'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\":", "encoded into a pointer. If the reference is outside of the base document,", "{\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\", \"baz\")) ([1, 2], [('foo', 'bar', 0,", "could only # happen on combiners because method merges two json # objects", "'bar', 0), ('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\":", "pylint: disable=consider-using-enumerate new_document = new_documents[i] newer_documents, newer_paths = traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i] )", "'b'}, ()) {'foo': 'b'} >>> schema_merge({'required': 'a'}, {'required': 'b'}, ()) {'required': ['a', 'b']}", "logging from collections.abc import Mapping, Sequence from typing import Any, List, Tuple from", "{\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\")) ([{'baz': 1, 'bin': 1}, {'baz': 2,", "List[tuple]]: \"\"\" When unpacking a sequence, we need to include multiple paths and", "Conflicting $ref could only # happen on combiners because method merges two json", "except KeyError as e: LOG.debug(\"Malformed Schema or incorrect path provided\\n%s\\n%s\", path, e) return", "def traverse_raw_schema(schema: dict, path: tuple): \"\"\"Traverse the raw json schema resolving $ref :raises", "Mapping, Sequence from typing import Any, List, Tuple from nested_lookup import nested_lookup from", "is invalid for this document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, tuple()) ([{'foo':", "current_path_parts, current_path.copy() )[0] new_documents.extend(new_document) for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate new_document =", "each sequence member: Append the traversed paths w/ the sequence index, and get", "KeyError as e: LOG.debug(\"Malformed Schema or incorrect path provided\\n%s\\n%s\", path, e) return {}", "OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>>", "# combining multiple $ref and types src_set = to_set(src_schema) try: target[TYPE] = to_set(", "key in ( REF, TYPE, ): # $ref and type are treated similarly", "base document, a unique pointer inside the base document is made by namespacing", "existing properties except KeyError: target[key] = src_schema else: next_path = path + (key,)", "\" \"for '{}': found '{}' and '{}'\" ) # pylint: disable=W0707 raise ConstraintError(msg,", "{\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\")) ([{'baz':", "{'$ref': 'a'}, {'type': ['b', 'c']} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])}", "tries to override \"type\" or \"$ref\" >>> schema_merge({}, {}, ()) {} >>> schema_merge({'foo':", "fragment_encode(path) message = message.format(*args, path=self.path) super().__init__(message) class BaseRefPlaceholder: \"\"\"A sentinel object representing a", "last_step, ) return sub_properties except KeyError as e: LOG.debug(\"Malformed Schema or incorrect path", "representing a reference inside the base document. BASE = BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite", "bool = False): # resolve $ref ref = nested_lookup(REF, sub_schema) # should be", "ConstraintError: the schema tries to override \"type\" or \"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}},", "{'$ref': 'a'}}, {'Foo': {'type': 'b'}} >>> schema_merge(a, b, ('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}}", "import OrderedSet from .pointer import fragment_decode, fragment_encode LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\",", "message, path, *args): self.path = fragment_encode(path) message = message.format(*args, path=self.path) super().__init__(message) class BaseRefPlaceholder:", "last): ... TypeError: Schema must be a dictionary \"\"\" if not isinstance(schema, Mapping):", "\"bar\", \"0\")) (42, ('foo', 'bar', 0), [42]) >>> traverse({}, [\"foo\"]) Traceback (most recent", "dhash.hexdigest() def to_set(value: Any) -> OrderedSet: return ( OrderedSet(value) if isinstance(value, (list, OrderedSet))", "return fragment_encode(parts) def traverse(document, path_parts): \"\"\"Traverse the document according to the reference. Since", "as the index. The next path part is added to the traversed path", "as lib # implicitly converts strings to sets target[TYPE] |= src_set except (TypeError,", "path_parts, path) def _handle_non_sequence_for_traverse( current_document: dict, current_path_parts: list, current_path: list ) -> Tuple[List[object],", "43, 44]}}, (\"foo\", \"bar\", \"*\")) ([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar',", "([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)]) >>>", ">>> schema_merge({'foo': 'a'}, {}, ()) {'foo': 'a'} >>> schema_merge({}, {'foo': 'a'}, ()) {'foo':", "{'foo': 'a'}, ()) {'foo': 'a'} >>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ()) {'foo': 'b'}", "returned. \"\"\" documents = [] resolved_paths = [] new_documents = [] new_paths =", "# implicitly converts strings to sets target[TYPE] |= src_set except (TypeError, KeyError): target_set", "out of range \"\"\" if path is None: path = [] if not", "to the reference. Since the document is presumed to be the reference's base,", "The next path part is the first part in the list of path", "call last): ... KeyError: 'foo' >>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most recent call last):", "traversal continues by recursively calling `traverse_path_for_sequence_members` \"\"\" part_to_handle = current_path_parts.pop(0) current_document = current_document[part_to_handle]", "'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'},", "sub_schema) if properties: sub_schema = properties[0] return sub_schema # pylint: disable=C0301 def traverse_raw_schema(schema:", "[1] >>> traverse_raw_schema({}, (\"foo\")) {} >>> traverse_raw_schema([], [\"foo\"]) Traceback (most recent call last):", "\"\"\"Traverse the raw json schema resolving $ref :raises TypeError: either schema is not", "return {} def traverse_path_for_sequence_members( document: dict, path_parts: Sequence, path: list = None )", "[('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\":", "_handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path ) # otherwise, sequence part should be a valid", "\"bar\")) ([42], ('foo', 'bar'), {'bar': [42]}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\", \"0\"))", "OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>>", "discarded. There is no validation that the reference is valid. Differing from traverse,", "isinstance(document, Sequence): part = int(part) parent = document document = document[part] path.append(part) return", "# assumption -> input is only json comparable type (dict/list/scalar) \"\"\"MD5 hash for", "key in (TYPE, REF): # combining multiple $ref and types src_set = to_set(src_schema)", "pass def item_hash( item, ): # assumption -> input is only json comparable", "Mapping): raise TypeError(\"Schema must be a dictionary\") try: properties = schema[\"properties\"] definitions =", "traverse([], [0]) Traceback (most recent call last): ... IndexError: list index out of", "\"\"\" Handling a non-sequence member for `traverse_path_for_sequence_members` is like the loop block in", "msg = ( \"Object at path '{path}' declared multiple values \" \"for '{}':", "b = {'$ref': 'a'}, {'$ref': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])}", "traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\")) ([42], ('foo', 'bar'), {'bar': [42]}) >>> traverse({\"foo\": {\"bar\":", "the remote base name inside the remote section. >>> rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar'", "a, b = {'$ref': 'a'}, {'foo': 'b'} >>> schema_merge(a, b, ('foo',)) {'$ref': 'a',", "in item].sort() encoded = json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest() def to_set(value: Any) ->", "dict, path: tuple): \"\"\"Traverse the raw json schema resolving $ref :raises TypeError: either", ">>> traverse({\"foo\": {\"bar\": [42]}}, tuple()) ({'foo': {'bar': [42]}}, (), None) >>> traverse({\"foo\": {\"bar\":", "path=self.path) super().__init__(message) class BaseRefPlaceholder: \"\"\"A sentinel object representing a reference inside the base", "path = [] for part in path_parts: if isinstance(document, Sequence): part = int(part)", "invalid for this document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, tuple()) ([{'foo': {'bar':", "'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref':", "TypeError: either schema is not of type dict :raises ConstraintError: the schema tries", "= None path = [] for part in path_parts: if isinstance(document, Sequence): part", "the sequence index, and get the new document. The new document is obtained", "appended to the list of new documents. For each new document: The remaining", "nested_lookup from ordered_set import OrderedSet from .pointer import fragment_decode, fragment_encode LOG = logging.getLogger(__name__)", "(most recent call last): ... ValueError: invalid literal for int() with base 10:", "json schema resolving $ref :raises TypeError: either schema is not of type dict", "the reference is invalid for this document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}},", "Schema must be a dictionary \"\"\" if not isinstance(schema, Mapping): raise TypeError(\"Schema must", "each sequence member. For each sequence member: Append the traversed paths w/ the", "of steps to prevent deeper traversal than requested for step in path: sub_properties", "('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo':", "Mapping) and isinstance(src, Mapping)): raise TypeError(\"Both schemas must be dictionaries\") for key, src_schema", "Traceback (most recent call last): ... KeyError: 'foo' >>> traverse([], [\"foo\"]) Traceback (most", "sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path ) # otherwise, sequence part", "by recursively calling `traverse_path_for_sequence_members` \"\"\" part_to_handle = current_path_parts.pop(0) current_document = current_document[part_to_handle] current_path.append(part_to_handle) return", "schema_merge(a, b, ('foo',)) {'$ref': 'a', 'foo': 'b'} >>> a, b = {'$ref': 'a'},", "e: LOG.debug(\"Malformed Schema or incorrect path provided\\n%s\\n%s\", path, e) return {} def traverse_path_for_sequence_members(", "('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\":", "target_set = to_set(target_schema) target[TYPE] = target_set | src_set try: # check if there", "paths for all sequence members in the document according to the reference. Since", "('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref':", "check if $ref is used directly -> # means that we need to", "no validation that the reference is valid. Differing from traverse, this returns a", "= ( \"Object at path '{path}' declared multiple values \" \"for '{}': found", "dict, path_parts: Sequence, path: list = None ) -> Tuple[List[object], List[tuple]]: \"\"\"Traverse the", "documents. For each new document: The remaining document is traversed using the remaining", "= \"type\" REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\" class FlatteningError(Exception): pass def item_hash(", "[\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo' >>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback", "message = message.format(*args, path=self.path) super().__init__(message) class BaseRefPlaceholder: \"\"\"A sentinel object representing a reference", "{\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa: B950 [1] >>>", "new_documents.extend(new_document) for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate new_document = new_documents[i] newer_documents, newer_paths", "target[TYPE] = target_set | src_set try: # check if there are conflicting $ref", "schema.get(\"definitions\", {}) sub_properties = properties last_step = ( len(path) - 1 ) #", "inside the base document. BASE = BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite a reference to", "a unique pointer inside the base document is made by namespacing the reference", "{'foo': {'bar': [42]}}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\")) ([42], ('foo', 'bar'), {'bar':", "are duplicate keys, src will overwrite target. :raises TypeError: either schema is not", "44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\", \"*\")) ([42,", "or target[REF] else: target_schema = target[key] # carry over existing properties except KeyError:", "b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'$ref': 'a'}, {'type':", "\"\"\"Readable representation for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return \"<BASE>\" #: The sentinel", "$ref in subschema on the top level # [-1] $ref must follow #/definitions/object", "self.path = fragment_encode(path) message = message.format(*args, path=self.path) super().__init__(message) class BaseRefPlaceholder: \"\"\"A sentinel object", "List[tuple]]: \"\"\" Check the new path part for the unpack sequence identifier (e.g.", "\"for '{}': found '{}' and '{}'\" ) # pylint: disable=W0707 raise ConstraintError(msg, path,", "ValueError, LookupError: the reference is invalid for this document >>> traverse({\"foo\": {\"bar\": [42]}},", "There is no validation that the reference is valid. :raises ValueError, LookupError: the", "raw json schema resolving $ref :raises TypeError: either schema is not of type", "last_step: bool = False): # resolve $ref ref = nested_lookup(REF, sub_schema) # should", "try: properties = schema[\"properties\"] definitions = schema.get(\"definitions\", {}) sub_properties = properties last_step =", "next_path) except TypeError: if key in (TYPE, REF): # combining multiple $ref and", "44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\":", "schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'type':", "are simply encoded into a pointer. If the reference is outside of the", "\"<BASE>\" #: The sentinel instance representing a reference inside the base document. BASE", "'foo' >>> traverse([], [0]) Traceback (most recent call last): ... IndexError: list index", "= [] new_documents = [] new_paths = [] for sequence_index in range(len(current_document)): new_paths.append(current_path.copy()", "import logging from collections.abc import Mapping, Sequence from typing import Any, List, Tuple", "must be dictionaries\") for key, src_schema in src.items(): try: if key in (", "deeper traversal than requested for step in path: sub_properties = _resolve_ref( sub_properties[step], definitions,", "The next path part is added to the traversed path \"\"\" sequence_part =", "fragment_decode, fragment_encode LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\") TYPE = \"type\" REF", "target.pop(REF) except KeyError: pass elif key == \"required\": target[key] = sorted(set(target_schema) | set(src_schema))", "rewrite_ref(ref): \"\"\"Rewrite a reference to be inside of the base document. A relative", "[{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\")) ([{'baz': 1,", "OrderedSet(['string', 'integer'])} \"\"\" if not (isinstance(target, Mapping) and isinstance(src, Mapping)): raise TypeError(\"Both schemas", "List[tuple]]: \"\"\" Handling a non-sequence member for `traverse_path_for_sequence_members` is like the loop block", "to check definition # otherwise it's an array and return subschema return sub_schema", "at path '{path}' declared multiple values \" \"for '{}': found '{}' and '{}'\"", "traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}},", "[42]}}, (\"foo\", \"bar\")) ([42], ('foo', 'bar'), {'bar': [42]}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\",", "dict, definitions: dict, last_step: bool = False): # resolve $ref ref = nested_lookup(REF,", "import json import logging from collections.abc import Mapping, Sequence from typing import Any,", "= [] new_paths = [] for sequence_index in range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index]) new_document", "sequence members in the document according to the reference. Since the document is", "list ) -> Tuple[List[object], List[tuple]]: \"\"\" When unpacking a sequence, we need to", "tuple()) ([{'foo': {'bar': [42, 43, 44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}},", "used directly -> # means that we need to check definition # otherwise", "'a'}, {'type': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b", "document paths to traverse :parameter path: traversed path so far :raises ValueError, LookupError:", "remaining path parts. The list of traversed documents and traversed paths are returned.", "the reference under the remote base name inside the remote section. >>> rewrite_ref((BASE,", "the traversed path. The traversal continues by recursively calling `traverse_path_for_sequence_members` \"\"\" part_to_handle =", "base document. A relative JSON pointer is returned (in URI fragment identifier representation).", "is not of type dict :raises ConstraintError: the schema tries to override \"type\"", "path) def _handle_non_sequence_for_traverse( current_document: dict, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]:", "schema_merge(a, b, ('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'}, {'type': 'b'}, ())", "rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar' >>>", "for k, v in item.items()} if isinstance(item, list): item = [item_hash(i) for i", "= {'$ref': 'a'}, {'foo': 'b'} >>> schema_merge(a, b, ('foo',)) {'$ref': 'a', 'foo': 'b'}", "parent = None path = [] for part in path_parts: if isinstance(document, Sequence):", "sequence) using the new path part as the index. The next path part", "= [] resolved_paths = [] new_documents = [] new_paths = [] for sequence_index", "document: dict, path_parts: Sequence, path: list = None ) -> Tuple[List[object], List[tuple]]: \"\"\"Traverse", "document is obtained from the current document using the new path part as", "document is traversed using the remaining path parts. The list of traversed documents", "already inside the base document (:const:`BASE`), the parts are simply encoded into a", "# [0] should be a single $ref in subschema on the top level", "{'type': 'integer'}, ()) {'type': OrderedSet(['string', 'integer'])} \"\"\" if not (isinstance(target, Mapping) and isinstance(src,", "validation that the reference is valid. :raises ValueError, LookupError: the reference is invalid", "sequence, we need to include multiple paths and multiple documents, one for each", "and '{}'\" ) # pylint: disable=W0707 raise ConstraintError(msg, path, key, target_schema, src_schema) target[key]", ")[0] new_documents.extend(new_document) for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate new_document = new_documents[i] newer_documents,", "sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest() def to_set(value: Any) -> OrderedSet: return ( OrderedSet(value) if", "_resolve_ref(sub_schema: dict, definitions: dict, last_step: bool = False): # resolve $ref ref =", "0), ('foo', 'bar', 1), ('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\":", "= {k: item_hash(v) for k, v in item.items()} if isinstance(item, list): item =", "= [] for part in path_parts: if isinstance(document, Sequence): part = int(part) parent", "# casting to ordered set as lib # implicitly converts strings to sets", "base is discarded. There is no validation that the reference is valid. Differing", "== \"required\": target[key] = sorted(set(target_schema) | set(src_schema)) else: if key in NON_MERGABLE_KEYS and", "document document = document[part] path.append(part) return document, tuple(path), parent def _resolve_ref(sub_schema: dict, definitions:", "and REF not in sub_schema: # dont traverse deeper than requested # check", "member. For each sequence member: Append the traversed paths w/ the sequence index,", "single value) # bc sub_schema is always per paranet property # (taken from", "collections.abc import Mapping, Sequence from typing import Any, List, Tuple from nested_lookup import", "using the new path part as the index. The next path part is", "if there are conflicting $ref and type # at the same sub schema.", "at the same sub schema. Conflicting $ref could only # happen on combiners", "traverse([], [\"foo\"]) Traceback (most recent call last): ... ValueError: invalid literal for int()", "schema_merge({'required': 'a'}, {'required': 'b'}, ()) {'required': ['a', 'b']} >>> a, b = {'$ref':", ">>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>>", "-> Tuple[List[object], List[tuple]]: \"\"\" Handling a non-sequence member for `traverse_path_for_sequence_members` is like the", "the reference is already inside the base document (:const:`BASE`), the parts are simply", "path_parts: if isinstance(document, Sequence): part = int(part) parent = document document = document[part]", "of resolved paths. :parameter document: document to traverse (dict or list) :parameter path_parts:", "and isinstance(src, Mapping)): raise TypeError(\"Both schemas must be dictionaries\") for key, src_schema in", "properties: sub_schema = properties[0] return sub_schema # pylint: disable=C0301 def traverse_raw_schema(schema: dict, path:", "None) >>> traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"]) ({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}}) >>>", "If the reference is outside of the base document, a unique pointer inside", "traverse_raw_schema([], [\"foo\"]) Traceback (most recent call last): ... TypeError: Schema must be a", "'b'}} >>> schema_merge(a, b, ('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'}, {'type':", "'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\", \"*\")) ([42, 43, 44],", "(), None) >>> traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"]) ({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}})", "# check if $ref is used directly -> # means that we need", "must be a dictionary\") try: properties = schema[\"properties\"] definitions = schema.get(\"definitions\", {}) sub_properties", "the list of path parts. The new document is obtained from the current", "Handling a non-sequence member for `traverse_path_for_sequence_members` is like the loop block in `traverse`:", "next_path = path + (key,) try: target[key] = schema_merge(target_schema, src_schema, next_path) except TypeError:", "last_step and REF not in sub_schema: # dont traverse deeper than requested #", "should be a single $ref in subschema on the top level # [-1]", "BASE: parts = [\"remote\", base] + parts return fragment_encode(parts) def traverse(document, path_parts): \"\"\"Traverse", "path part is added to the traversed path \"\"\" sequence_part = current_path_parts.pop(0) if", "returns a list of documents and a list of resolved paths. :parameter document:", "[tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]:", "return ( OrderedSet(value) if isinstance(value, (list, OrderedSet)) else OrderedSet([value]) ) class ConstraintError(FlatteningError, ValueError):", "item = [item_hash(i) for i in item].sort() encoded = json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return", "({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\")) ([42],", "the current document using the new path part as the key. The next", "there are duplicate keys, src will overwrite target. :raises TypeError: either schema is", "document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, tuple()) ([{'foo': {'bar': [42, 43, 44]}}],", "for sequence_index in range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index]) new_document = traverse_path_for_sequence_members( current_document, [sequence_index] +", "[] for part in path_parts: if isinstance(document, Sequence): part = int(part) parent =", "[tuple(path)] path_parts = list(path_parts) if not isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document, path_parts, path) return", "The remaining document is traversed using the remaining path parts. The list of", "['a', 'b']} >>> a, b = {'$ref': 'a'}, {'foo': 'b'} >>> schema_merge(a, b,", "member for `traverse_path_for_sequence_members` is like the loop block in `traverse`: The next path", "sub_properties[step], definitions, last_step=path.index(step) == last_step, ) return sub_properties except KeyError as e: LOG.debug(\"Malformed", "will overwrite target. :raises TypeError: either schema is not of type dict :raises", "$ref could only # happen on combiners because method merges two json #", "path_parts: document paths to traverse :parameter path: traversed path so far :raises ValueError,", "'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most recent call last): ... KeyError:", "isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document, path_parts, path) return _handle_sequence_for_traverse(document, path_parts, path) def _handle_non_sequence_for_traverse( current_document:", "is outside of the base document, a unique pointer inside the base document", "of new documents. For each new document: The remaining document is traversed using", "'{}'\" ) # pylint: disable=W0707 raise ConstraintError(msg, path, key, target_schema, src_schema) target[key] =", "'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b =", "include multiple paths and multiple documents, one for each sequence member. For each", ") # otherwise, sequence part should be a valid index current_sequence_part = int(sequence_part)", "(Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5() # nosec if isinstance(item, dict): item = {k: item_hash(v)", "the first part in the list of path parts. The new document is", "base document (:const:`BASE`), the parts are simply encoded into a pointer. If the", "multiple paths and multiple documents, one for each sequence member. For each sequence", "hash for an item (Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5() # nosec if isinstance(item, dict):", "the reference is valid. Differing from traverse, this returns a list of documents", "a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type':", "= current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document, current_path_parts, current_path ) def _handle_sequence_for_traverse( current_document: Sequence,", "path, e) return {} def traverse_path_for_sequence_members( document: dict, path_parts: Sequence, path: list =", "the new path part for the unpack sequence identifier (e.g. '*'), otherwise traverse", "if not isinstance(schema, Mapping): raise TypeError(\"Schema must be a dictionary\") try: properties =", "path_parts): \"\"\"Traverse the document according to the reference. Since the document is presumed", "('foo', 'bar', 1), ('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1},", "src will overwrite target. :raises TypeError: either schema is not of type dict", "target_schema = target[key] # carry over existing properties except KeyError: target[key] = src_schema", "return subschema return sub_schema if ref: # [0] should be a single $ref", "new_document = traverse_path_for_sequence_members( current_document, [sequence_index] + current_path_parts, current_path.copy() )[0] new_documents.extend(new_document) for i in", "'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}},", "43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\")) ([[42, 43,", "for int() with base 10: 'foo' >>> traverse([], [0]) Traceback (most recent call", "(most recent call last): ... TypeError: Schema must be a dictionary \"\"\" if", "disable=R0912 \"\"\"Merges the src schema into the target schema in place. If there", "TypeError(\"Both schemas must be dictionaries\") for key, src_schema in src.items(): try: if key", "reference is valid. :raises ValueError, LookupError: the reference is invalid for this document", "2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\":", "[{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] -> # { \"ref\": \"..#1..\", \"type\": [{},{}] } target.pop(REF) except", "than requested for step in path: sub_properties = _resolve_ref( sub_properties[step], definitions, last_step=path.index(step) ==", "\"\"\"Merges the src schema into the target schema in place. If there are", "# check if there are conflicting $ref and type # at the same", "Tuple[List[object], List[tuple]]: \"\"\" Handling a non-sequence member for `traverse_path_for_sequence_members` is like the loop", "{\"bar\": [42]}}, tuple()) {'bar': [42]} >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\":", "from the current document using the new path part as the key. The", ">>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])} >>> schema_merge(a, b, ('foo',))", "converts strings to sets target[TYPE] |= src_set except (TypeError, KeyError): target_set = to_set(target_schema)", "for each sequence member. For each sequence member: Append the traversed paths w/", "1, 'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo'", "$ref and types src_set = to_set(src_schema) try: target[TYPE] = to_set( target[TYPE] ) #", "new document: The remaining document is traversed using the remaining path parts. The", "| set(src_schema)) else: if key in NON_MERGABLE_KEYS and target_schema != src_schema: msg =", "= None ) -> Tuple[List[object], List[tuple]]: \"\"\"Traverse the paths for all sequence members", "set as lib # implicitly converts strings to sets target[TYPE] |= src_set except", "prevent deeper traversal than requested for step in path: sub_properties = _resolve_ref( sub_properties[step],", "{'type': OrderedSet(['string', 'integer'])} \"\"\" if not (isinstance(target, Mapping) and isinstance(src, Mapping)): raise TypeError(\"Both", "from nested_lookup import nested_lookup from ordered_set import OrderedSet from .pointer import fragment_decode, fragment_encode", "resolved paths. :parameter document: document to traverse (dict or list) :parameter path_parts: document", ">>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type':", "index current_sequence_part = int(sequence_part) current_document = current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse(", "document is obtained from the current document (a sequence) using the new path", "new document is appended to the list of new documents. For each new", "the top level # [-1] $ref must follow #/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]] #", "index. The new document is appended to the list of new documents. For", "{'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': ['a', 'b']}, {'$ref': 'c'}", "recent call last): ... KeyError: 'foo' >>> traverse([], [\"foo\"]) Traceback (most recent call", "be the reference's base, the base is discarded. There is no validation that", "The new document is obtained from the current document using the new path", "\"..#2..\"}] -> # { \"ref\": \"..#1..\", \"type\": [{},{}] } target.pop(REF) except KeyError: pass", "\"bin\": 2}]}}, (\"foo\", \"bar\", \"*\", \"baz\")) ([1, 2], [('foo', 'bar', 0, 'baz'), ('foo',", ">>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\", \"*\")) ([42, 43, 44], [('foo',", "\"type\": [{},{}] } target.pop(REF) except KeyError: pass elif key == \"required\": target[key] =", "{ \"ref\": \"..#1..\", \"type\": [{},{}] } target.pop(REF) except KeyError: pass elif key ==", "TypeError: Schema must be a dictionary \"\"\" if not isinstance(schema, Mapping): raise TypeError(\"Schema", ">>> schema_merge(a, b, ('foo',)) {'$ref': 'a', 'foo': 'b'} >>> a, b = {'$ref':", "path part as the index. The next path part is added to the", "Since the document is presumed to be the reference's base, the base is", "\"*\")) ([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo',", "that the reference is valid. Differing from traverse, this returns a list of", "[42, 43, 44]}}, tuple()) ([{'foo': {'bar': [42, 43, 44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\":", "except TypeError: if key in (TYPE, REF): # combining multiple $ref and types", "{'$ref': 'a'}, {'$ref': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a,", "`traverse_path_for_sequence_members` \"\"\" part_to_handle = current_path_parts.pop(0) current_document = current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document, current_path_parts,", "that we need to check definition # otherwise it's an array and return", "else: if key in NON_MERGABLE_KEYS and target_schema != src_schema: msg = ( \"Object", "b, ('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) #", "current_path_parts.pop(0) current_document = current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document, current_path_parts, current_path ) def _handle_sequence_for_traverse(", "losing any previous info: # e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] -> # {", "list of traversed documents and traversed paths are returned. \"\"\" documents = []", "input is only json comparable type (dict/list/scalar) \"\"\"MD5 hash for an item (Dictionary/Iterable/Scalar)\"\"\"", "44]}}, (\"foo\", \"bar\")) ([[42, 43, 44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43,", "current document using the new path part as the key. The next path", "{}, ()) {'foo': 'a'} >>> schema_merge({}, {'foo': 'a'}, ()) {'foo': 'a'} >>> schema_merge({'foo':", "to the list of new documents. For each new document: The remaining document", "_handle_sequence_for_traverse(document, path_parts, path) def _handle_non_sequence_for_traverse( current_document: dict, current_path_parts: list, current_path: list ) ->", "'b', 'c'])} >>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>> schema_merge(a,", "target[key] # carry over existing properties except KeyError: target[key] = src_schema else: next_path", "schema[\"properties\"] definitions = schema.get(\"definitions\", {}) sub_properties = properties last_step = ( len(path) -", "URI fragment identifier representation). If the reference is already inside the base document", "{'bar': [42, 43, 44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, [\"foo\"]) ([{'bar':", "\"foo\", \"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base, *parts = ref if base", "ValueError: invalid literal for int() with base 10: 'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback", "if path is None: path = [] if not path_parts: return [document], [tuple(path)]", "list of documents and a list of resolved paths. :parameter document: document to", "from collections.abc import Mapping, Sequence from typing import Any, List, Tuple from nested_lookup", "(\"foo\", \"bar\", \"*\")) ([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo',", "multiple values \" \"for '{}': found '{}' and '{}'\" ) # pylint: disable=W0707", "'b'} >>> schema_merge(a, b, ('foo',)) {'$ref': 'a', 'foo': 'b'} >>> a, b =", "path part is added to the traversed path. The traversal continues by recursively", "the remaining path parts. The list of traversed documents and traversed paths are", "'bar', 0), [42]) >>> traverse({}, [\"foo\"]) Traceback (most recent call last): ... KeyError:", "The new document is appended to the list of new documents. For each", "is like the loop block in `traverse`: The next path part is the", "the same sub schema. Conflicting $ref could only # happen on combiners because", "if sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path ) # otherwise, sequence", "with base 10: 'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback (most recent call last): ...", "Tuple from nested_lookup import nested_lookup from ordered_set import OrderedSet from .pointer import fragment_decode,", "properties = nested_lookup(\"properties\", sub_schema) if properties: sub_schema = properties[0] return sub_schema # pylint:", "unique pointer inside the base document is made by namespacing the reference under", "OrderedSet: return ( OrderedSet(value) if isinstance(value, (list, OrderedSet)) else OrderedSet([value]) ) class ConstraintError(FlatteningError,", "target_set | src_set try: # check if there are conflicting $ref and type", "= new_documents[i] newer_documents, newer_paths = traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths) return", "KeyError: 'foo' >>> traverse([], [\"foo\"]) Traceback (most recent call last): ... ValueError: invalid", "far :raises ValueError, LookupError: the reference is invalid for this document >>> traverse_path_for_sequence_members({\"foo\":", "valid. :raises ValueError, LookupError: the reference is invalid for this document >>> traverse({\"foo\":", "paths. :parameter document: document to traverse (dict or list) :parameter path_parts: document paths", "def rewrite_ref(ref): \"\"\"Rewrite a reference to be inside of the base document. A", "path: sub_properties = _resolve_ref( sub_properties[step], definitions, last_step=path.index(step) == last_step, ) return sub_properties except", "level # [-1] $ref must follow #/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]] # resolve properties", "['b', 'c']} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b", ">>> schema_merge(a, b, ('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'}, {'type': 'b'},", "key == \"required\": target[key] = sorted(set(target_schema) | set(src_schema)) else: if key in NON_MERGABLE_KEYS", "class BaseRefPlaceholder: \"\"\"A sentinel object representing a reference inside the base document.\"\"\" def", "'#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base, *parts = ref if base is not", "the new path part as the index. The next path part is added", "The next path part is added to the traversed path. The traversal continues", "#/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]] # resolve properties properties = nested_lookup(\"properties\", sub_schema) if properties:", "type (dict/list/scalar) \"\"\"MD5 hash for an item (Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5() # nosec", "= properties last_step = ( len(path) - 1 ) # get amount of", "('foo', 'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most recent call last): ...", "traversed paths are returned. \"\"\" documents = [] resolved_paths = [] new_documents =", "'foo' >>> traverse([], [\"foo\"]) Traceback (most recent call last): ... ValueError: invalid literal", "('foo', 'bar'), {'bar': [42]}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\", \"0\")) (42, ('foo',", "key. The next path part is added to the traversed path. The traversal", "ValueError: invalid literal for int() with base 10: 'foo' >>> traverse([], [0]) Traceback", "current_path_parts, current_path ) # otherwise, sequence part should be a valid index current_sequence_part", "raise TypeError(\"Schema must be a dictionary\") try: properties = schema[\"properties\"] definitions = schema.get(\"definitions\",", "'a'} >>> schema_merge({}, {'foo': 'a'}, ()) {'foo': 'a'} >>> schema_merge({'foo': 'a'}, {'foo': 'b'},", ">>> schema_merge({}, {}, ()) {} >>> schema_merge({'foo': 'a'}, {}, ()) {'foo': 'a'} >>>", "ordered_set import OrderedSet from .pointer import fragment_decode, fragment_encode LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS =", "combiners because method merges two json # objects without losing any previous info:", "unified target_schema = target.get(key) or target.get(TYPE) or target[REF] else: target_schema = target[key] #", "and unified target_schema = target.get(key) or target.get(TYPE) or target[REF] else: target_schema = target[key]", "{\"bar\": [42]}}, (\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",))", "dict :raises ConstraintError: the schema tries to override \"type\" or \"$ref\" >>> schema_merge({},", "no validation that the reference is valid. :raises ValueError, LookupError: the reference is", "reference is invalid for this document >>> traverse({\"foo\": {\"bar\": [42]}}, tuple()) ({'foo': {'bar':", "If there are duplicate keys, src will overwrite target. :raises TypeError: either schema", "type # at the same sub schema. Conflicting $ref could only # happen", "for the unpack sequence identifier (e.g. '*'), otherwise traverse index and continue: The", "= {'$ref': 'a'}, {'$ref': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>>", "the src schema into the target schema in place. If there are duplicate", "traverse_path_for_sequence_members( document: dict, path_parts: Sequence, path: list = None ) -> Tuple[List[object], List[tuple]]:", "info: # e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] -> # { \"ref\": \"..#1..\", \"type\":", "{'type': OrderedSet(['b', 'c'])} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a,", "conflicting $ref and type # at the same sub schema. Conflicting $ref could", "sets target[TYPE] |= src_set except (TypeError, KeyError): target_set = to_set(target_schema) target[TYPE] = target_set", "path_parts, path) return _handle_sequence_for_traverse(document, path_parts, path) def _handle_non_sequence_for_traverse( current_document: dict, current_path_parts: list, current_path:", "({'foo': {'bar': [42]}}, (), None) >>> traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"]) ({'bar': [42]}, ('foo',),", ") # get amount of steps to prevent deeper traversal than requested for", "= to_set(src_schema) try: target[TYPE] = to_set( target[TYPE] ) # casting to ordered set", "44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, [\"foo\"]) ([{'bar': [42, 43, 44]}],", "document. The new document is obtained by traversing the current document using the", "If the reference is already inside the base document (:const:`BASE`), the parts are", "new_document, current_path_parts, new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents, resolved_paths def schema_merge(target, src, path):", "\"\"\" parent = None path = [] for part in path_parts: if isinstance(document,", "= message.format(*args, path=self.path) super().__init__(message) class BaseRefPlaceholder: \"\"\"A sentinel object representing a reference inside", "'#' >>> rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base, *parts =", "call last): ... KeyError: 'foo' >>> traverse([], [\"foo\"]) Traceback (most recent call last):", "NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\") TYPE = \"type\" REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\"", "unpacking a sequence, we need to include multiple paths and multiple documents, one", "*parts = ref if base is not BASE: parts = [\"remote\", base] +", "44]}}, tuple()) ([{'foo': {'bar': [42, 43, 44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43,", "{'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE {'type':", "the schema tries to override \"type\" or \"$ref\" >>> schema_merge({}, {}, ()) {}", "not isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document, path_parts, path) return _handle_sequence_for_traverse(document, path_parts, path) def _handle_non_sequence_for_traverse(", "current_path ) # otherwise, sequence part should be a valid index current_sequence_part =", "('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'type': ['b', 'c']}", "call last): ... ValueError: invalid literal for int() with base 10: 'foo' >>>", "recent call last): ... ValueError: invalid literal for int() with base 10: 'foo'", ">>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\":", "as e: LOG.debug(\"Malformed Schema or incorrect path provided\\n%s\\n%s\", path, e) return {} def", "(in URI fragment identifier representation). If the reference is already inside the base", "= path + (key,) try: target[key] = schema_merge(target_schema, src_schema, next_path) except TypeError: if", "sequence_index in range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index]) new_document = traverse_path_for_sequence_members( current_document, [sequence_index] + current_path_parts,", "[42]}}, (\"foo\", \"bar\", \"0\")) (42, ('foo', 'bar', 0), [42]) >>> traverse({}, [\"foo\"]) Traceback", "new documents. For each new document: The remaining document is traversed using the", "# objects without losing any previous info: # e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}]", "Traceback (most recent call last): ... ValueError: invalid literal for int() with base", "section. >>> rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\", \"foo\", \"bar\"))", "resolve $ref ref = nested_lookup(REF, sub_schema) # should be safe (always single value)", "$ref and type are treated similarly and unified target_schema = target.get(key) or target.get(TYPE)", "schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'Foo': {'$ref':", "\"\"\" sequence_part = current_path_parts.pop(0) if sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path", "= ref if base is not BASE: parts = [\"remote\", base] + parts", "part is added to the traversed path \"\"\" sequence_part = current_path_parts.pop(0) if sequence_part", "traverse_path_for_sequence_members([], [0]) Traceback (most recent call last): ... IndexError: list index out of", "list ) -> Tuple[List[object], List[tuple]]: \"\"\" Check the new path part for the", "dict, last_step: bool = False): # resolve $ref ref = nested_lookup(REF, sub_schema) #", "continue: The new document is obtained from the current document (a sequence) using", "next path part is added to the traversed path \"\"\" sequence_part = current_path_parts.pop(0)", "\"b\")) # noqa: B950 [1] >>> traverse_raw_schema({}, (\"foo\")) {} >>> traverse_raw_schema([], [\"foo\"]) Traceback", "part_to_handle = current_path_parts.pop(0) current_document = current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document, current_path_parts, current_path )", "members in the document according to the reference. Since the document is presumed", "!= src_schema: msg = ( \"Object at path '{path}' declared multiple values \"", "base, the base is discarded. There is no validation that the reference is", "document to traverse (dict or list) :parameter path_parts: document paths to traverse :parameter", "subschema on the top level # [-1] $ref must follow #/definitions/object sub_schema =", "2}], [('foo', 'bar', 0), ('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\":", "is obtained by traversing the current document using the sequence index. The new", "sequence identifier (e.g. '*'), otherwise traverse index and continue: The new document is", "# pylint: disable=C0301 def traverse_raw_schema(schema: dict, path: tuple): \"\"\"Traverse the raw json schema", "_resolve_ref( sub_properties[step], definitions, last_step=path.index(step) == last_step, ) return sub_properties except KeyError as e:", "OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'type': ['b', 'c']} >>> schema_merge(a,", ">>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, tuple()) ([{'foo': {'bar': [42, 43, 44]}}], [()])", "# should be safe (always single value) # bc sub_schema is always per", "): # $ref and type are treated similarly and unified target_schema = target.get(key)", "target.get(TYPE) or target[REF] else: target_schema = target[key] # carry over existing properties except", "multiple $ref and types src_set = to_set(src_schema) try: target[TYPE] = to_set( target[TYPE] )", "import Any, List, Tuple from nested_lookup import nested_lookup from ordered_set import OrderedSet from", "\"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa: B950 [1] >>> traverse_raw_schema({},", "so far :raises ValueError, LookupError: the reference is invalid for this document >>>", "traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple()) {'bar': [42]} >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",)) [42] >>>", "tuple()) {'bar': [42]} >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\":", "(42, ('foo', 'bar', 0), [42]) >>> traverse({}, [\"foo\"]) Traceback (most recent call last):", "e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] -> # { \"ref\": \"..#1..\", \"type\": [{},{}] }", "to include multiple paths and multiple documents, one for each sequence member. For", "to_set(target_schema) target[TYPE] = target_set | src_set try: # check if there are conflicting", "the loop block in `traverse`: The next path part is the first part", "with base 10: 'foo' >>> traverse([], [0]) Traceback (most recent call last): ...", "a list of resolved paths. :parameter document: document to traverse (dict or list)", "('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': ['a', 'b']}, {'$ref':", "base, *parts = ref if base is not BASE: parts = [\"remote\", base]", "FlatteningError(Exception): pass def item_hash( item, ): # assumption -> input is only json", "b, ('foo',)) {'$ref': 'a', 'foo': 'b'} >>> a, b = {'$ref': 'a'}, {'type':", "(\"uniqueItems\", \"insertionOrder\") TYPE = \"type\" REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\" class FlatteningError(Exception):", "always per paranet property # (taken from definitions) if last_step and REF not", "the raw json schema resolving $ref :raises TypeError: either schema is not of", "from ordered_set import OrderedSet from .pointer import fragment_decode, fragment_encode LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS", "from traverse, this returns a list of documents and a list of resolved", "b = {'$ref': 'a'}, {'foo': 'b'} >>> schema_merge(a, b, ('foo',)) {'$ref': 'a', 'foo':", "a valid index current_sequence_part = int(sequence_part) current_document = current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document], [tuple(current_path)]", ") # casting to ordered set as lib # implicitly converts strings to", "remote section. >>> rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\", \"foo\",", "key in NON_MERGABLE_KEYS and target_schema != src_schema: msg = ( \"Object at path", "= int(sequence_part) current_document = current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document: Sequence,", "[current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object],", "== UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path ) # otherwise, sequence part should", "([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar',", "b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'$ref': 'b'}", "sequence index, and get the new document. The new document is obtained by", "literal for int() with base 10: 'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback (most recent", "schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': ['a',", "multiple documents, one for each sequence member. For each sequence member: Append the", ") class ConstraintError(FlatteningError, ValueError): def __init__(self, message, path, *args): self.path = fragment_encode(path) message", "[{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\", \"baz\")) ([1,", "- 1 ) # get amount of steps to prevent deeper traversal than", "range \"\"\" parent = None path = [] for part in path_parts: if", "2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\", \"baz\")) ([1, 2], [('foo', 'bar', 0, 'baz'),", "OrderedSet([value]) ) class ConstraintError(FlatteningError, ValueError): def __init__(self, message, path, *args): self.path = fragment_encode(path)", "'#/remote/remote' \"\"\" base, *parts = ref if base is not BASE: parts =", "traversed path so far :raises ValueError, LookupError: the reference is invalid for this", "target[TYPE] ) # casting to ordered set as lib # implicitly converts strings", "'a'} >>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ()) {'foo': 'b'} >>> schema_merge({'required': 'a'}, {'required':", "path part as the key. The next path part is added to the", "objects without losing any previous info: # e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] ->", "relative JSON pointer is returned (in URI fragment identifier representation). If the reference", "from .pointer import fragment_decode, fragment_encode LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\") TYPE", "src_schema, next_path) except TypeError: if key in (TYPE, REF): # combining multiple $ref", "There is no validation that the reference is valid. Differing from traverse, this", "obtained from the current document (a sequence) using the new path part as", "sub_schema = definitions[fragment_decode(ref[0])[-1]] # resolve properties properties = nested_lookup(\"properties\", sub_schema) if properties: sub_schema", "new_paths.append(current_path.copy() + [sequence_index]) new_document = traverse_path_for_sequence_members( current_document, [sequence_index] + current_path_parts, current_path.copy() )[0] new_documents.extend(new_document)", "new_document = new_documents[i] newer_documents, newer_paths = traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths)", "{'foo': 'a'} >>> schema_merge({}, {'foo': 'a'}, ()) {'foo': 'a'} >>> schema_merge({'foo': 'a'}, {'foo':", "\"required\": target[key] = sorted(set(target_schema) | set(src_schema)) else: if key in NON_MERGABLE_KEYS and target_schema", "()) # doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'}, {'type': 'integer'}, ())", "LookupError: the reference is invalid for this document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43,", "range(len(new_documents)): # pylint: disable=consider-using-enumerate new_document = new_documents[i] newer_documents, newer_paths = traverse_path_for_sequence_members( new_document, current_path_parts,", "{k: item_hash(v) for k, v in item.items()} if isinstance(item, list): item = [item_hash(i)", "+ [sequence_index]) new_document = traverse_path_for_sequence_members( current_document, [sequence_index] + current_path_parts, current_path.copy() )[0] new_documents.extend(new_document) for", "target[TYPE] = to_set( target[TYPE] ) # casting to ordered set as lib #", "ConstraintError: the schema tries to override \"type\" or \"$ref\" >>> schema_merge({}, {}, ())", "of traversed documents and traversed paths are returned. \"\"\" documents = [] resolved_paths", "0), [42]) >>> traverse({}, [\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo'", "()) {'type': OrderedSet(['string', 'integer'])} \"\"\" if not (isinstance(target, Mapping) and isinstance(src, Mapping)): raise", ">>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}} >>> schema_merge(a, b,", "item].sort() encoded = json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest() def to_set(value: Any) -> OrderedSet:", "simply encoded into a pointer. If the reference is outside of the base", ">>> traverse({}, [\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo' >>> traverse([],", "Traceback (most recent call last): ... IndexError: list index out of range \"\"\"", "traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\"))", "2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\")) ([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin':", "the reference's base, the base is discarded. There is no validation that the", "([42], ('foo', 'bar'), {'bar': [42]}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\", \"0\")) (42,", "def __repr__(self): \"\"\"Readable representation for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return \"<BASE>\" #:", "current_sequence_part = int(sequence_part) current_document = current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document:", "this returns a list of documents and a list of resolved paths. :parameter", "a, b = {'$ref': 'a'}, {'$ref': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a',", "traversed path. The traversal continues by recursively calling `traverse_path_for_sequence_members` \"\"\" part_to_handle = current_path_parts.pop(0)", "{'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}} >>> schema_merge(a, b, ('foo',)) {'Foo': {'type': OrderedSet(['a',", "dont traverse deeper than requested # check if $ref is used directly ->", "pass elif key == \"required\": target[key] = sorted(set(target_schema) | set(src_schema)) else: if key", "are treated similarly and unified target_schema = target.get(key) or target.get(TYPE) or target[REF] else:", "ordered set as lib # implicitly converts strings to sets target[TYPE] |= src_set", "= definitions[fragment_decode(ref[0])[-1]] # resolve properties properties = nested_lookup(\"properties\", sub_schema) if properties: sub_schema =", "# carry over existing properties except KeyError: target[key] = src_schema else: next_path =", "sub_schema: # dont traverse deeper than requested # check if $ref is used", "document (:const:`BASE`), the parts are simply encoded into a pointer. If the reference", "on the top level # [-1] $ref must follow #/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]]", "dictionary\") try: properties = schema[\"properties\"] definitions = schema.get(\"definitions\", {}) sub_properties = properties last_step", "('foo', 'bar', 0), [42]) >>> traverse({}, [\"foo\"]) Traceback (most recent call last): ...", "pointer inside the base document is made by namespacing the reference under the", "[] if not path_parts: return [document], [tuple(path)] path_parts = list(path_parts) if not isinstance(document,", "combining multiple $ref and types src_set = to_set(src_schema) try: target[TYPE] = to_set( target[TYPE]", "\"bar\", \"*\")) ([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0),", "# { \"ref\": \"..#1..\", \"type\": [{},{}] } target.pop(REF) except KeyError: pass elif key", "{'type': 'b'}} >>> schema_merge(a, b, ('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'},", "representation for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return \"<BASE>\" #: The sentinel instance", "0, 'baz'), ('foo', 'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most recent call", "{'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])}", "new_documents = [] new_paths = [] for sequence_index in range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index])", "sequence member. For each sequence member: Append the traversed paths w/ the sequence", "'bar'), {'bar': [42]}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\", \"0\")) (42, ('foo', 'bar',", "\"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\" class FlatteningError(Exception): pass def item_hash( item, ): # assumption", "... ValueError: invalid literal for int() with base 10: 'foo' >>> traverse([], [0])", "(most recent call last): ... KeyError: 'foo' >>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most recent", "Sequence): part = int(part) parent = document document = document[part] path.append(part) return document,", "()) {'required': ['a', 'b']} >>> a, b = {'$ref': 'a'}, {'foo': 'b'} >>>", "B950 [1] >>> traverse_raw_schema({}, (\"foo\")) {} >>> traverse_raw_schema([], [\"foo\"]) Traceback (most recent call", "sequence part should be a valid index current_sequence_part = int(sequence_part) current_document = current_document[current_sequence_part]", "{\"bar\": [42, 43, 44]}}, [\"foo\"]) ([{'bar': [42, 43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\":", "\"..#1..\"},{\"$ref\": \"..#2..\"}] -> # { \"ref\": \"..#1..\", \"type\": [{},{}] } target.pop(REF) except KeyError:", "inside the base document is made by namespacing the reference under the remote", "and continue: The new document is obtained from the current document (a sequence)", "document >>> traverse({\"foo\": {\"bar\": [42]}}, tuple()) ({'foo': {'bar': [42]}}, (), None) >>> traverse({\"foo\":", "follow #/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]] # resolve properties properties = nested_lookup(\"properties\", sub_schema) if", "definitions, last_step=path.index(step) == last_step, ) return sub_properties except KeyError as e: LOG.debug(\"Malformed Schema", "returned (in URI fragment identifier representation). If the reference is already inside the", "all sequence members in the document according to the reference. Since the document", "for key, src_schema in src.items(): try: if key in ( REF, TYPE, ):", "sorted(set(target_schema) | set(src_schema)) else: if key in NON_MERGABLE_KEYS and target_schema != src_schema: msg", "# otherwise, sequence part should be a valid index current_sequence_part = int(sequence_part) current_document", "10: 'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback (most recent call last): ... IndexError: list", "traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo' >>> traverse_path_for_sequence_members([], [\"foo\"])", "dictionary \"\"\" if not isinstance(schema, Mapping): raise TypeError(\"Schema must be a dictionary\") try:", "[42]}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\", \"0\")) (42, ('foo', 'bar', 0), [42])", "1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\")) ([{'baz': 1, 'bin':", "using the new path part as the key. The next path part is", "Check the new path part for the unpack sequence identifier (e.g. '*'), otherwise", "get amount of steps to prevent deeper traversal than requested for step in", "current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list", "traversed path \"\"\" sequence_part = current_path_parts.pop(0) if sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document,", "dict): item = {k: item_hash(v) for k, v in item.items()} if isinstance(item, list):", "treated similarly and unified target_schema = target.get(key) or target.get(TYPE) or target[REF] else: target_schema", "= {'$ref': 'a'}, {'type': ['b', 'c']} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b',", "schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': OrderedSet(['a',", "a pointer. If the reference is outside of the base document, a unique", "there are conflicting $ref and type # at the same sub schema. Conflicting", "if key in NON_MERGABLE_KEYS and target_schema != src_schema: msg = ( \"Object at", "{'type': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b =", "index out of range \"\"\" parent = None path = [] for part", "v in item.items()} if isinstance(item, list): item = [item_hash(i) for i in item].sort()", "properties except KeyError: target[key] = src_schema else: next_path = path + (key,) try:", "# noqa: C901 # pylint: disable=R0912 \"\"\"Merges the src schema into the target", "item, ): # assumption -> input is only json comparable type (dict/list/scalar) \"\"\"MD5", "step in path: sub_properties = _resolve_ref( sub_properties[step], definitions, last_step=path.index(step) == last_step, ) return", "[()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, [\"foo\"]) ([{'bar': [42, 43, 44]}], [('foo',)])", "'a'}, {'$ref': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b", "type dict :raises ConstraintError: the schema tries to override \"type\" or \"$ref\" >>>", "{'$ref': 'a', 'foo': 'b'} >>> a, b = {'$ref': 'a'}, {'type': 'b'} >>>", "json import logging from collections.abc import Mapping, Sequence from typing import Any, List,", "REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\" class FlatteningError(Exception): pass def item_hash( item, ):", "parts. The new document is obtained from the current document using the new", "same sub schema. Conflicting $ref could only # happen on combiners because method", "new path part as the index. The next path part is added to", "and get the new document. The new document is obtained by traversing the", ">>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa: B950 [1] >>> traverse_raw_schema({}, (\"foo\")) {} >>> traverse_raw_schema([],", "reference is already inside the base document (:const:`BASE`), the parts are simply encoded", "'b', 'c'])} >>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}} >>>", "isinstance(schema, Mapping): raise TypeError(\"Schema must be a dictionary\") try: properties = schema[\"properties\"] definitions", "for an item (Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5() # nosec if isinstance(item, dict): item", "reference inside the base document.\"\"\" def __repr__(self): \"\"\"Readable representation for debugging. >>> repr(BaseRefPlaceholder())", "is valid. :raises ValueError, LookupError: the reference is invalid for this document >>>", "list of resolved paths. :parameter document: document to traverse (dict or list) :parameter", "schema_merge({}, {'foo': 'a'}, ()) {'foo': 'a'} >>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ()) {'foo':", "according to the reference. Since the document is presumed to be the reference's", "to prevent deeper traversal than requested for step in path: sub_properties = _resolve_ref(", ">>> traverse([], [\"foo\"]) Traceback (most recent call last): ... ValueError: invalid literal for", "next path part is added to the traversed path. The traversal continues by", "\"bar\", \"*\", \"baz\")) ([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')])", "similarly and unified target_schema = target.get(key) or target.get(TYPE) or target[REF] else: target_schema =", "if key in ( REF, TYPE, ): # $ref and type are treated", "the base document. BASE = BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite a reference to be", "= fragment_encode(path) message = message.format(*args, path=self.path) super().__init__(message) class BaseRefPlaceholder: \"\"\"A sentinel object representing", "{\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa:", "the base document is made by namespacing the reference under the remote base", "document is presumed to be the reference's base, the base is discarded. There", "None path = [] for part in path_parts: if isinstance(document, Sequence): part =", "resolve properties properties = nested_lookup(\"properties\", sub_schema) if properties: sub_schema = properties[0] return sub_schema", "(e.g. '*'), otherwise traverse index and continue: The new document is obtained from", "= hashlib.md5() # nosec if isinstance(item, dict): item = {k: item_hash(v) for k,", "reference is outside of the base document, a unique pointer inside the base", "= current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path:", "'b'}, ()) # doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'}, {'type': 'integer'},", "... IndexError: list index out of range \"\"\" if path is None: path", "{'type': ['a', 'b']}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])}", "the index. The next path part is added to the traversed path \"\"\"", "each new document: The remaining document is traversed using the remaining path parts.", "class ConstraintError(FlatteningError, ValueError): def __init__(self, message, path, *args): self.path = fragment_encode(path) message =", "for `traverse_path_for_sequence_members` is like the loop block in `traverse`: The next path part", "[42]}}, (\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type':", "'{path}' declared multiple values \" \"for '{}': found '{}' and '{}'\" ) #", "comparable type (dict/list/scalar) \"\"\"MD5 hash for an item (Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5() #", "import fragment_decode, fragment_encode LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\") TYPE = \"type\"", "# doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'}, {'type': 'integer'}, ()) {'type':", "is not BASE: parts = [\"remote\", base] + parts return fragment_encode(parts) def traverse(document,", "current_document: dict, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Handling a", "new document is obtained by traversing the current document using the sequence index.", "than requested # check if $ref is used directly -> # means that", "this document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, tuple()) ([{'foo': {'bar': [42, 43,", "+ current_path_parts, current_path.copy() )[0] new_documents.extend(new_document) for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate new_document", "\"baz\")) ([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({},", "the base document, a unique pointer inside the base document is made by", "'b'} >>> a, b = {'$ref': 'a'}, {'type': 'b'} >>> schema_merge(a, b, ('foo',))", "if isinstance(document, Sequence): part = int(part) parent = document document = document[part] path.append(part)", "{'$ref': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b =", "traversed paths w/ the sequence index, and get the new document. The new", "inside the remote section. >>> rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>>", "'*'), otherwise traverse index and continue: The new document is obtained from the", "schema into the target schema in place. If there are duplicate keys, src", "b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a',", "JSON pointer is returned (in URI fragment identifier representation). If the reference is", "[document], [tuple(path)] path_parts = list(path_parts) if not isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document, path_parts, path)", "\"Object at path '{path}' declared multiple values \" \"for '{}': found '{}' and", "part in path_parts: if isinstance(document, Sequence): part = int(part) parent = document document", ">>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple()) {'bar': [42]} >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",)) [42]", ">>> traverse_path_for_sequence_members([], [0]) Traceback (most recent call last): ... IndexError: list index out", "try: target[TYPE] = to_set( target[TYPE] ) # casting to ordered set as lib", "-> input is only json comparable type (dict/list/scalar) \"\"\"MD5 hash for an item", "are returned. \"\"\" documents = [] resolved_paths = [] new_documents = [] new_paths", "List[tuple]]: \"\"\"Traverse the paths for all sequence members in the document according to", "-> Tuple[List[object], List[tuple]]: \"\"\" When unpacking a sequence, we need to include multiple", "a reference inside the base document. BASE = BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite a", "strings to sets target[TYPE] |= src_set except (TypeError, KeyError): target_set = to_set(target_schema) target[TYPE]", "pointer is returned (in URI fragment identifier representation). If the reference is already", "path = [] if not path_parts: return [document], [tuple(path)] path_parts = list(path_parts) if", "LookupError: the reference is invalid for this document >>> traverse({\"foo\": {\"bar\": [42]}}, tuple())", "= current_path_parts.pop(0) if sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path ) #", "document. A relative JSON pointer is returned (in URI fragment identifier representation). If", "return document, tuple(path), parent def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool = False):", "duplicate keys, src will overwrite target. :raises TypeError: either schema is not of", "current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Check the", "b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}} >>> schema_merge(a, b, ('foo',)) {'Foo':", "(always single value) # bc sub_schema is always per paranet property # (taken", "{\"bar\": [42]}}, (\"foo\", \"bar\", \"0\")) (42, ('foo', 'bar', 0), [42]) >>> traverse({}, [\"foo\"])", "traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most recent call last): ... ValueError: invalid literal for int()", "be dictionaries\") for key, src_schema in src.items(): try: if key in ( REF,", "in the document according to the reference. Since the document is presumed to", "item = {k: item_hash(v) for k, v in item.items()} if isinstance(item, list): item", "return [document], [tuple(path)] path_parts = list(path_parts) if not isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document, path_parts,", "is None: path = [] if not path_parts: return [document], [tuple(path)] path_parts =", "in place. If there are duplicate keys, src will overwrite target. :raises TypeError:", "part as the index. The next path part is added to the traversed", ">>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo' >>> traverse_path_for_sequence_members([],", "json comparable type (dict/list/scalar) \"\"\"MD5 hash for an item (Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5()", "last): ... IndexError: list index out of range \"\"\" if path is None:", "for int() with base 10: 'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback (most recent call", "'b'])} >>> a, b = {'$ref': 'a'}, {'$ref': 'b'} >>> schema_merge(a, b, ('foo',))", "document according to the reference. Since the document is presumed to be the", "validation that the reference is valid. Differing from traverse, this returns a list", "[item_hash(i) for i in item].sort() encoded = json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest() def", "need to include multiple paths and multiple documents, one for each sequence member.", "try: if key in ( REF, TYPE, ): # $ref and type are", ":parameter path: traversed path so far :raises ValueError, LookupError: the reference is invalid", "the reference. Since the document is presumed to be the reference's base, the", "is traversed using the remaining path parts. The list of traversed documents and", "documents and a list of resolved paths. :parameter document: document to traverse (dict", "next path part is the first part in the list of path parts.", "\"\"\" if path is None: path = [] if not path_parts: return [document],", "= [item_hash(i) for i in item].sort() encoded = json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest()", "identifier representation). If the reference is already inside the base document (:const:`BASE`), the", "traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, [\"foo\"]) ([{'bar': [42, 43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\":", "recent call last): ... KeyError: 'foo' >>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most recent call", "schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type':", "rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base, *parts = ref if base is not BASE: parts", "disable=consider-using-enumerate new_document = new_documents[i] newer_documents, newer_paths = traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i] ) documents.extend(newer_documents)", "is always per paranet property # (taken from definitions) if last_step and REF", "src_set except (TypeError, KeyError): target_set = to_set(target_schema) target[TYPE] = target_set | src_set try:", "bc sub_schema is always per paranet property # (taken from definitions) if last_step", "\"type\" REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\" class FlatteningError(Exception): pass def item_hash( item,", "# at the same sub schema. Conflicting $ref could only # happen on", "paths to traverse :parameter path: traversed path so far :raises ValueError, LookupError: the", "\"foo\", \"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",))", "paths and multiple documents, one for each sequence member. For each sequence member:", "reference's base, the base is discarded. There is no validation that the reference", "def schema_merge(target, src, path): # noqa: C901 # pylint: disable=R0912 \"\"\"Merges the src", "a dictionary \"\"\" if not isinstance(schema, Mapping): raise TypeError(\"Schema must be a dictionary\")", "`traverse_path_for_sequence_members` is like the loop block in `traverse`: The next path part is", "def traverse(document, path_parts): \"\"\"Traverse the document according to the reference. Since the document", "block in `traverse`: The next path part is the first part in the", "-> Tuple[List[object], List[tuple]]: \"\"\"Traverse the paths for all sequence members in the document", "( REF, TYPE, ): # $ref and type are treated similarly and unified", "documents and traversed paths are returned. \"\"\" documents = [] resolved_paths = []", "top level # [-1] $ref must follow #/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]] # resolve", "target[TYPE] |= src_set except (TypeError, KeyError): target_set = to_set(target_schema) target[TYPE] = target_set |", "the reference is valid. :raises ValueError, LookupError: the reference is invalid for this", "{} def traverse_path_for_sequence_members( document: dict, path_parts: Sequence, path: list = None ) ->", "value) # bc sub_schema is always per paranet property # (taken from definitions)", "part as the key. The next path part is added to the traversed", "is obtained from the current document (a sequence) using the new path part", "1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\", \"baz\")) ([1, 2],", "into the target schema in place. If there are duplicate keys, src will", "{'foo': 'b'} >>> schema_merge(a, b, ('foo',)) {'$ref': 'a', 'foo': 'b'} >>> a, b", "part for the unpack sequence identifier (e.g. '*'), otherwise traverse index and continue:", "-> OrderedSet: return ( OrderedSet(value) if isinstance(value, (list, OrderedSet)) else OrderedSet([value]) ) class", "{'Foo': {'type': 'b'}} >>> schema_merge(a, b, ('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type':", "traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa: B950 [1] >>> traverse_raw_schema({}, (\"foo\")) {} >>> traverse_raw_schema([], [\"foo\"])", "path: traversed path so far :raises ValueError, LookupError: the reference is invalid for", "sentinel instance representing a reference inside the base document. BASE = BaseRefPlaceholder() def", "traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, tuple()) ([{'foo': {'bar': [42, 43, 44]}}], [()]) >>>", "[42, 43, 44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, [\"foo\"]) ([{'bar': [42,", "Differing from traverse, this returns a list of documents and a list of", "a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}} >>> schema_merge(a, b, ('foo',))", "src schema into the target schema in place. If there are duplicate keys,", "happen on combiners because method merges two json # objects without losing any", "... TypeError: Schema must be a dictionary \"\"\" if not isinstance(schema, Mapping): raise", "path.append(part) return document, tuple(path), parent def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool =", "\"..#1..\", \"type\": [{},{}] } target.pop(REF) except KeyError: pass elif key == \"required\": target[key]", "REF): # combining multiple $ref and types src_set = to_set(src_schema) try: target[TYPE] =", "current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" When unpacking a sequence,", "in item.items()} if isinstance(item, list): item = [item_hash(i) for i in item].sort() encoded", "of the base document, a unique pointer inside the base document is made", "src_set = to_set(src_schema) try: target[TYPE] = to_set( target[TYPE] ) # casting to ordered", "try: # check if there are conflicting $ref and type # at the", "for all sequence members in the document according to the reference. Since the", "1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)])", "OrderedSet(['b', 'c'])} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b", "target_schema != src_schema: msg = ( \"Object at path '{path}' declared multiple values", "1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\", \"baz\")) ([1, 2], [('foo', 'bar',", "\"*\" class FlatteningError(Exception): pass def item_hash( item, ): # assumption -> input is", "last): ... ValueError: invalid literal for int() with base 10: 'foo' >>> traverse([],", "'a'}, {'foo': 'b'} >>> schema_merge(a, b, ('foo',)) {'$ref': 'a', 'foo': 'b'} >>> a,", "last): ... KeyError: 'foo' >>> traverse([], [\"foo\"]) Traceback (most recent call last): ...", "schema tries to override \"type\" or \"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple()) {'bar':", "Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Check the new", "last): ... ValueError: invalid literal for int() with base 10: 'foo' >>> traverse_path_for_sequence_members([],", ">>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, [\"foo\"]) ([{'bar': [42, 43, 44]}], [('foo',)]) >>>", "{'bar': [42]}}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\")) ([42], ('foo', 'bar'), {'bar': [42]})", "is no validation that the reference is valid. Differing from traverse, this returns", "C901 # pylint: disable=R0912 \"\"\"Merges the src schema into the target schema in", "{'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'}", "traversed documents and traversed paths are returned. \"\"\" documents = [] resolved_paths =", "sequence index. The new document is appended to the list of new documents.", "return \"<BASE>\" #: The sentinel instance representing a reference inside the base document.", "calling `traverse_path_for_sequence_members` \"\"\" part_to_handle = current_path_parts.pop(0) current_document = current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document,", "+NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'}, {'type': 'integer'}, ()) {'type': OrderedSet(['string', 'integer'])}", "b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': OrderedSet(['a', 'b'])},", "current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Check the new path", "LOG.debug(\"Malformed Schema or incorrect path provided\\n%s\\n%s\", path, e) return {} def traverse_path_for_sequence_members( document:", "== last_step, ) return sub_properties except KeyError as e: LOG.debug(\"Malformed Schema or incorrect", "isinstance(value, (list, OrderedSet)) else OrderedSet([value]) ) class ConstraintError(FlatteningError, ValueError): def __init__(self, message, path,", "traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\")) ([[42, 43, 44]], [('foo', 'bar')]) >>>", "traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents, resolved_paths def schema_merge(target, src,", "isinstance(item, dict): item = {k: item_hash(v) for k, v in item.items()} if isinstance(item,", "added to the traversed path \"\"\" sequence_part = current_path_parts.pop(0) if sequence_part == UNPACK_SEQUENCE_IDENTIFIER:", "declared multiple values \" \"for '{}': found '{}' and '{}'\" ) # pylint:", "traversed using the remaining path parts. The list of traversed documents and traversed", "[\"remote\", base] + parts return fragment_encode(parts) def traverse(document, path_parts): \"\"\"Traverse the document according", "paranet property # (taken from definitions) if last_step and REF not in sub_schema:", "keys, src will overwrite target. :raises TypeError: either schema is not of type", "added to the traversed path. The traversal continues by recursively calling `traverse_path_for_sequence_members` \"\"\"", "range \"\"\" if path is None: path = [] if not path_parts: return", "else OrderedSet([value]) ) class ConstraintError(FlatteningError, ValueError): def __init__(self, message, path, *args): self.path =", "document[part] path.append(part) return document, tuple(path), parent def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool", "base name inside the remote section. >>> rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,))", "{} >>> schema_merge({'foo': 'a'}, {}, ()) {'foo': 'a'} >>> schema_merge({}, {'foo': 'a'}, ())", "document is made by namespacing the reference under the remote base name inside", "per paranet property # (taken from definitions) if last_step and REF not in", "dhash.update(encoded) return dhash.hexdigest() def to_set(value: Any) -> OrderedSet: return ( OrderedSet(value) if isinstance(value,", "(most recent call last): ... KeyError: 'foo' >>> traverse([], [\"foo\"]) Traceback (most recent", "= nested_lookup(\"properties\", sub_schema) if properties: sub_schema = properties[0] return sub_schema # pylint: disable=C0301", "{\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) #", "document = document[part] path.append(part) return document, tuple(path), parent def _resolve_ref(sub_schema: dict, definitions: dict,", "IndexError: list index out of range \"\"\" if path is None: path =", "$ref and type # at the same sub schema. Conflicting $ref could only", "or \"$ref\" >>> schema_merge({}, {}, ()) {} >>> schema_merge({'foo': 'a'}, {}, ()) {'foo':", "+ parts return fragment_encode(parts) def traverse(document, path_parts): \"\"\"Traverse the document according to the", "__repr__(self): \"\"\"Readable representation for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return \"<BASE>\" #: The", "path is None: path = [] if not path_parts: return [document], [tuple(path)] path_parts", "requested # check if $ref is used directly -> # means that we", "or \"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple()) {'bar': [42]} >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}},", "'c'])} >>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])} >>> schema_merge(a, b,", "len(path) - 1 ) # get amount of steps to prevent deeper traversal", "The new document is obtained by traversing the current document using the sequence", "encoded = json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest() def to_set(value: Any) -> OrderedSet: return", "(taken from definitions) if last_step and REF not in sub_schema: # dont traverse", "None: path = [] if not path_parts: return [document], [tuple(path)] path_parts = list(path_parts)", "deeper than requested # check if $ref is used directly -> # means", "be inside of the base document. A relative JSON pointer is returned (in", "schema in place. If there are duplicate keys, src will overwrite target. :raises", "the reference is outside of the base document, a unique pointer inside the", "into a pointer. If the reference is outside of the base document, a", "ref: # [0] should be a single $ref in subschema on the top", "\"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] -> # { \"ref\": \"..#1..\", \"type\": [{},{}] } target.pop(REF)", "w/ the sequence index, and get the new document. The new document is", "if last_step and REF not in sub_schema: # dont traverse deeper than requested", "[('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\", \"*\")) ([42, 43,", "43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\":", "[] for sequence_index in range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index]) new_document = traverse_path_for_sequence_members( current_document, [sequence_index]", "def item_hash( item, ): # assumption -> input is only json comparable type", "target[key] = sorted(set(target_schema) | set(src_schema)) else: if key in NON_MERGABLE_KEYS and target_schema !=", ">>> schema_merge({}, {'foo': 'a'}, ()) {'foo': 'a'} >>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ())", "a reference to be inside of the base document. A relative JSON pointer", "base 10: 'foo' >>> traverse([], [0]) Traceback (most recent call last): ... IndexError:", "OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': ['a', 'b']}, {'$ref': 'c'} >>>", "{'foo': 'b'} >>> schema_merge({'required': 'a'}, {'required': 'b'}, ()) {'required': ['a', 'b']} >>> a,", "# bc sub_schema is always per paranet property # (taken from definitions) if", "\"\"\" if not isinstance(schema, Mapping): raise TypeError(\"Schema must be a dictionary\") try: properties", "(a sequence) using the new path part as the index. The next path", "OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a',", "path, *args): self.path = fragment_encode(path) message = message.format(*args, path=self.path) super().__init__(message) class BaseRefPlaceholder: \"\"\"A", "For each sequence member: Append the traversed paths w/ the sequence index, and", "sub_schema # pylint: disable=C0301 def traverse_raw_schema(schema: dict, path: tuple): \"\"\"Traverse the raw json", "# pylint: disable=W0707 raise ConstraintError(msg, path, key, target_schema, src_schema) target[key] = src_schema return", "path: list = None ) -> Tuple[List[object], List[tuple]]: \"\"\"Traverse the paths for all", "schema_merge(target_schema, src_schema, next_path) except TypeError: if key in (TYPE, REF): # combining multiple", "current_document, current_path_parts, current_path ) # otherwise, sequence part should be a valid index", "# happen on combiners because method merges two json # objects without losing", "if isinstance(value, (list, OrderedSet)) else OrderedSet([value]) ) class ConstraintError(FlatteningError, ValueError): def __init__(self, message,", "documents, resolved_paths def schema_merge(target, src, path): # noqa: C901 # pylint: disable=R0912 \"\"\"Merges", "'c'])} >>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}} >>> schema_merge(a,", "json # objects without losing any previous info: # e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\":", "([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"])", "traverse_path_for_sequence_members( current_document, current_path_parts, current_path ) def _handle_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list", "property # (taken from definitions) if last_step and REF not in sub_schema: #", "[\"foo\"]) ({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\"))", "'b'])} >>> a, b = {'$ref': 'a'}, {'type': ['b', 'c']} >>> schema_merge(a, b,", "\"\"\"MD5 hash for an item (Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5() # nosec if isinstance(item,", "if key in (TYPE, REF): # combining multiple $ref and types src_set =", "sub_schema is always per paranet property # (taken from definitions) if last_step and", "'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most recent", "be a dictionary\") try: properties = schema[\"properties\"] definitions = schema.get(\"definitions\", {}) sub_properties =", "('foo',), {'foo': {'bar': [42]}}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\")) ([42], ('foo', 'bar'),", "object representing a reference inside the base document.\"\"\" def __repr__(self): \"\"\"Readable representation for", "parts. The list of traversed documents and traversed paths are returned. \"\"\" documents", "current_path_parts.pop(0) if sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path ) # otherwise,", "be a valid index current_sequence_part = int(sequence_part) current_document = current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document],", "under the remote base name inside the remote section. >>> rewrite_ref((BASE, \"foo\", \"bar\"))", "0), ('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2,", "src.items(): try: if key in ( REF, TYPE, ): # $ref and type", "without losing any previous info: # e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] -> #", "to override \"type\" or \"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple()) {'bar': [42]} >>>", "b = {'$ref': 'a'}, {'type': ['b', 'c']} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a',", "\"*\")) ([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)])", "reference to be inside of the base document. A relative JSON pointer is", "of type dict :raises ConstraintError: the schema tries to override \"type\" or \"$ref\"", "()) {'foo': 'a'} >>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ()) {'foo': 'b'} >>> schema_merge({'required':", "43, 44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\", \"*\"))", "list(path_parts) if not isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document, path_parts, path) return _handle_sequence_for_traverse(document, path_parts, path)", "part is added to the traversed path. The traversal continues by recursively calling", "implicitly converts strings to sets target[TYPE] |= src_set except (TypeError, KeyError): target_set =", "is valid. Differing from traverse, this returns a list of documents and a", "int() with base 10: 'foo' >>> traverse([], [0]) Traceback (most recent call last):", "44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\")) ([[42, 43, 44]],", "traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\"))", "and target_schema != src_schema: msg = ( \"Object at path '{path}' declared multiple", "if base is not BASE: parts = [\"remote\", base] + parts return fragment_encode(parts)", "new_paths = [] for sequence_index in range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index]) new_document = traverse_path_for_sequence_members(", "index out of range \"\"\" if path is None: path = [] if", "rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base, *parts", "made by namespacing the reference under the remote base name inside the remote", "inside the base document.\"\"\" def __repr__(self): \"\"\"Readable representation for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>'", "is added to the traversed path \"\"\" sequence_part = current_path_parts.pop(0) if sequence_part ==", "[{},{}] } target.pop(REF) except KeyError: pass elif key == \"required\": target[key] = sorted(set(target_schema)", "traversing the current document using the sequence index. The new document is appended", "the remote section. >>> rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\",", "the new path part as the key. The next path part is added", "The new document is obtained from the current document (a sequence) using the", "instance representing a reference inside the base document. BASE = BaseRefPlaceholder() def rewrite_ref(ref):", "unpack sequence identifier (e.g. '*'), otherwise traverse index and continue: The new document", "lib # implicitly converts strings to sets target[TYPE] |= src_set except (TypeError, KeyError):", "invalid literal for int() with base 10: 'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback (most", "in range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index]) new_document = traverse_path_for_sequence_members( current_document, [sequence_index] + current_path_parts, current_path.copy()", "this document >>> traverse({\"foo\": {\"bar\": [42]}}, tuple()) ({'foo': {'bar': [42]}}, (), None) >>>", "a list of documents and a list of resolved paths. :parameter document: document", "parent def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool = False): # resolve $ref", "'#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\"", "current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Handling a non-sequence member", "is made by namespacing the reference under the remote base name inside the", "in `traverse`: The next path part is the first part in the list", "'a'}}, {'Foo': {'type': 'b'}} >>> schema_merge(a, b, ('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}} >>>", "path. The traversal continues by recursively calling `traverse_path_for_sequence_members` \"\"\" part_to_handle = current_path_parts.pop(0) current_document", "= {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b',", "base is discarded. There is no validation that the reference is valid. :raises", "nested_lookup(REF, sub_schema) # should be safe (always single value) # bc sub_schema is", "list index out of range \"\"\" parent = None path = [] for", "\"type\" or \"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple()) {'bar': [42]} >>> traverse_raw_schema({\"properties\": {\"bar\":", "(\"foo\")) {} >>> traverse_raw_schema([], [\"foo\"]) Traceback (most recent call last): ... TypeError: Schema", "BASE = BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite a reference to be inside of the", "carry over existing properties except KeyError: target[key] = src_schema else: next_path = path", "noqa: C901 # pylint: disable=R0912 \"\"\"Merges the src schema into the target schema", ":parameter document: document to traverse (dict or list) :parameter path_parts: document paths to", "Any, List, Tuple from nested_lookup import nested_lookup from ordered_set import OrderedSet from .pointer", "(:const:`BASE`), the parts are simply encoded into a pointer. If the reference is", "src_set try: # check if there are conflicting $ref and type # at", "1), ('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2,", "a, b = {'type': ['a', 'b']}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type':", "[42, 43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\")) ([[42,", "to be the reference's base, the base is discarded. There is no validation", ">>> a, b = {'$ref': 'a'}, {'type': 'b'} >>> schema_merge(a, b, ('foo',)) {'type':", "raise TypeError(\"Both schemas must be dictionaries\") for key, src_schema in src.items(): try: if", "\"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base, *parts = ref if base is", "_handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" When", "= nested_lookup(REF, sub_schema) # should be safe (always single value) # bc sub_schema", "{\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa: B950 [1]", "directly -> # means that we need to check definition # otherwise it's", "and type # at the same sub schema. Conflicting $ref could only #", "()) {'foo': 'b'} >>> schema_merge({'required': 'a'}, {'required': 'b'}, ()) {'required': ['a', 'b']} >>>", "and types src_set = to_set(src_schema) try: target[TYPE] = to_set( target[TYPE] ) # casting", "definitions[fragment_decode(ref[0])[-1]] # resolve properties properties = nested_lookup(\"properties\", sub_schema) if properties: sub_schema = properties[0]", "[('foo', 'bar', 0), ('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1},", "{'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'type': ['b', 'c']} >>>", "\"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\")) ([{'baz': 1, 'bin': 1},", "'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback (most recent call last): ... IndexError: list index", "presumed to be the reference's base, the base is discarded. There is no", "\"*\", \"baz\")) ([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')]) >>>", "index. The next path part is added to the traversed path \"\"\" sequence_part", "schema tries to override \"type\" or \"$ref\" >>> schema_merge({}, {}, ()) {} >>>", "isinstance(item, list): item = [item_hash(i) for i in item].sort() encoded = json.dumps(item, sort_keys=True).encode()", "part is the first part in the list of path parts. The new", "is returned (in URI fragment identifier representation). If the reference is already inside", "{'required': 'b'}, ()) {'required': ['a', 'b']} >>> a, b = {'$ref': 'a'}, {'foo':", ">>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most recent call last): ... ValueError: invalid literal for", "base document is made by namespacing the reference under the remote base name", "representing a reference inside the base document.\"\"\" def __repr__(self): \"\"\"Readable representation for debugging.", "= document[part] path.append(part) return document, tuple(path), parent def _resolve_ref(sub_schema: dict, definitions: dict, last_step:", "{\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\", \"*\")) ([42, 43, 44], [('foo', 'bar', 0),", "{'foo': 'a'} >>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ()) {'foo': 'b'} >>> schema_merge({'required': 'a'},", "nested_lookup import nested_lookup from ordered_set import OrderedSet from .pointer import fragment_decode, fragment_encode LOG", "'baz'), ('foo', 'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most recent call last):", "Sequence): return _handle_non_sequence_for_traverse(document, path_parts, path) return _handle_sequence_for_traverse(document, path_parts, path) def _handle_non_sequence_for_traverse( current_document: dict,", "{'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'$ref': 'b'} >>> schema_merge(a,", "list ) -> Tuple[List[object], List[tuple]]: \"\"\" Handling a non-sequence member for `traverse_path_for_sequence_members` is", "list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" When unpacking a sequence, we", "[42, 43, 44]}}, (\"foo\", \"bar\")) ([[42, 43, 44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\":", "current_document = current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document, current_path_parts, current_path ) def _handle_sequence_for_traverse( current_document:", "current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" When unpacking a sequence, we need", "def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool = False): # resolve $ref ref", "TYPE = \"type\" REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\" class FlatteningError(Exception): pass def", "new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents, resolved_paths def schema_merge(target, src, path): # noqa:", "schema_merge(target, src, path): # noqa: C901 # pylint: disable=R0912 \"\"\"Merges the src schema", "the schema tries to override \"type\" or \"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple())", "'c']} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b =", "invalid for this document >>> traverse({\"foo\": {\"bar\": [42]}}, tuple()) ({'foo': {'bar': [42]}}, (),", "requested for step in path: sub_properties = _resolve_ref( sub_properties[step], definitions, last_step=path.index(step) == last_step,", "paths are returned. \"\"\" documents = [] resolved_paths = [] new_documents = []", ") -> Tuple[List[object], List[tuple]]: \"\"\"Traverse the paths for all sequence members in the", "'b'}, ()) {'required': ['a', 'b']} >>> a, b = {'$ref': 'a'}, {'foo': 'b'}", "path parts. The list of traversed documents and traversed paths are returned. \"\"\"", ") -> Tuple[List[object], List[tuple]]: \"\"\" Handling a non-sequence member for `traverse_path_for_sequence_members` is like", "`traverse`: The next path part is the first part in the list of", "{\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\", \"baz\"))", "the key. The next path part is added to the traversed path. The", "# noqa: B950 [1] >>> traverse_raw_schema({}, (\"foo\")) {} >>> traverse_raw_schema([], [\"foo\"]) Traceback (most", "document is obtained by traversing the current document using the sequence index. The", "list) :parameter path_parts: document paths to traverse :parameter path: traversed path so far", ">>> traverse([], [0]) Traceback (most recent call last): ... IndexError: list index out", "the document is presumed to be the reference's base, the base is discarded.", "document using the sequence index. The new document is appended to the list", "{'bar': [42]} >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\":", "single $ref in subschema on the top level # [-1] $ref must follow", "nosec if isinstance(item, dict): item = {k: item_hash(v) for k, v in item.items()}", "Schema or incorrect path provided\\n%s\\n%s\", path, e) return {} def traverse_path_for_sequence_members( document: dict,", "document. BASE = BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite a reference to be inside of", "The sentinel instance representing a reference inside the base document. BASE = BaseRefPlaceholder()", "only json comparable type (dict/list/scalar) \"\"\"MD5 hash for an item (Dictionary/Iterable/Scalar)\"\"\" dhash =", "{'required': ['a', 'b']} >>> a, b = {'$ref': 'a'}, {'foo': 'b'} >>> schema_merge(a,", "Traceback (most recent call last): ... KeyError: 'foo' >>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most", "if isinstance(item, list): item = [item_hash(i) for i in item].sort() encoded = json.dumps(item,", "-> # means that we need to check definition # otherwise it's an", "{\"bar\": [42]}}, [\"foo\"]) ({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}}) >>> traverse({\"foo\": {\"bar\": [42]}},", "parts are simply encoded into a pointer. If the reference is outside of", "to sets target[TYPE] |= src_set except (TypeError, KeyError): target_set = to_set(target_schema) target[TYPE] =", "not (isinstance(target, Mapping) and isinstance(src, Mapping)): raise TypeError(\"Both schemas must be dictionaries\") for", "import hashlib import json import logging from collections.abc import Mapping, Sequence from typing", "dictionaries\") for key, src_schema in src.items(): try: if key in ( REF, TYPE,", "-> # { \"ref\": \"..#1..\", \"type\": [{},{}] } target.pop(REF) except KeyError: pass elif", "the current document using the sequence index. The new document is appended to", ">>> a, b = {'$ref': 'a'}, {'foo': 'b'} >>> schema_merge(a, b, ('foo',)) {'$ref':", "definitions = schema.get(\"definitions\", {}) sub_properties = properties last_step = ( len(path) - 1", "(most recent call last): ... IndexError: list index out of range \"\"\" parent", "\"\"\"Rewrite a reference to be inside of the base document. A relative JSON", "\"\"\"A sentinel object representing a reference inside the base document.\"\"\" def __repr__(self): \"\"\"Readable", "# [-1] $ref must follow #/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]] # resolve properties properties", "to ordered set as lib # implicitly converts strings to sets target[TYPE] |=", "of documents and a list of resolved paths. :parameter document: document to traverse", "casting to ordered set as lib # implicitly converts strings to sets target[TYPE]", "= target.get(key) or target.get(TYPE) or target[REF] else: target_schema = target[key] # carry over", "e) return {} def traverse_path_for_sequence_members( document: dict, path_parts: Sequence, path: list = None", "sequence_part = current_path_parts.pop(0) if sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path )", "target.get(key) or target.get(TYPE) or target[REF] else: target_schema = target[key] # carry over existing", "traverse_raw_schema(schema: dict, path: tuple): \"\"\"Traverse the raw json schema resolving $ref :raises TypeError:", "= ( len(path) - 1 ) # get amount of steps to prevent", "hashlib import json import logging from collections.abc import Mapping, Sequence from typing import", "tuple()) ({'foo': {'bar': [42]}}, (), None) >>> traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"]) ({'bar': [42]},", "pylint: disable=W0707 raise ConstraintError(msg, path, key, target_schema, src_schema) target[key] = src_schema return target", "on combiners because method merges two json # objects without losing any previous", "call last): ... IndexError: list index out of range \"\"\" parent = None", "target[key] = schema_merge(target_schema, src_schema, next_path) except TypeError: if key in (TYPE, REF): #", "to traverse (dict or list) :parameter path_parts: document paths to traverse :parameter path:", "target. :raises TypeError: either schema is not of type dict :raises ConstraintError: the", "to the traversed path \"\"\" sequence_part = current_path_parts.pop(0) if sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return", "'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)]) >>>", "[('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most", "[-1] $ref must follow #/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]] # resolve properties properties =", "[] new_documents = [] new_paths = [] for sequence_index in range(len(current_document)): new_paths.append(current_path.copy() +", "base document.\"\"\" def __repr__(self): \"\"\"Readable representation for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return", "'b'])}} >>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])}", "from definitions) if last_step and REF not in sub_schema: # dont traverse deeper", "subschema return sub_schema if ref: # [0] should be a single $ref in", "else: next_path = path + (key,) try: target[key] = schema_merge(target_schema, src_schema, next_path) except", "recent call last): ... IndexError: list index out of range \"\"\" parent =", "an array and return subschema return sub_schema if ref: # [0] should be", "steps to prevent deeper traversal than requested for step in path: sub_properties =", "part = int(part) parent = document document = document[part] path.append(part) return document, tuple(path),", "to override \"type\" or \"$ref\" >>> schema_merge({}, {}, ()) {} >>> schema_merge({'foo': 'a'},", "'{}': found '{}' and '{}'\" ) # pylint: disable=W0707 raise ConstraintError(msg, path, key,", "def to_set(value: Any) -> OrderedSet: return ( OrderedSet(value) if isinstance(value, (list, OrderedSet)) else", "current_document, [sequence_index] + current_path_parts, current_path.copy() )[0] new_documents.extend(new_document) for i in range(len(new_documents)): # pylint:", "a single $ref in subschema on the top level # [-1] $ref must", "1 ) # get amount of steps to prevent deeper traversal than requested", "\"\"\"Traverse the paths for all sequence members in the document according to the", "sequence member: Append the traversed paths w/ the sequence index, and get the", "logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\") TYPE = \"type\" REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER =", "current_document, current_path_parts, current_path ) def _handle_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list )", ">>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ()) {'foo': 'b'} >>> schema_merge({'required': 'a'}, {'required': 'b'},", "be a single $ref in subschema on the top level # [-1] $ref", "of path parts. The new document is obtained from the current document using", "if not isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document, path_parts, path) return _handle_sequence_for_traverse(document, path_parts, path) def", ">>> traverse_raw_schema({}, (\"foo\")) {} >>> traverse_raw_schema([], [\"foo\"]) Traceback (most recent call last): ...", "traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\", \"*\")) ([42, 43, 44], [('foo', 'bar',", "not in sub_schema: # dont traverse deeper than requested # check if $ref", "{'$ref': 'a'}, {'foo': 'b'} >>> schema_merge(a, b, ('foo',)) {'$ref': 'a', 'foo': 'b'} >>>", "overwrite target. :raises TypeError: either schema is not of type dict :raises ConstraintError:", "the base is discarded. There is no validation that the reference is valid.", "= target[key] # carry over existing properties except KeyError: target[key] = src_schema else:", "'bar', 1), ('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\":", "to_set(src_schema) try: target[TYPE] = to_set( target[TYPE] ) # casting to ordered set as", "(\"foo\", \"bar\")) ([[42, 43, 44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}},", "return [current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) ->", "a non-sequence member for `traverse_path_for_sequence_members` is like the loop block in `traverse`: The", ">>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base, *parts = ref if base is not BASE:", "parts return fragment_encode(parts) def traverse(document, path_parts): \"\"\"Traverse the document according to the reference.", "10: 'foo' >>> traverse([], [0]) Traceback (most recent call last): ... IndexError: list", "i in item].sort() encoded = json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest() def to_set(value: Any)", "(\"foo\", \"bar\", \"0\")) (42, ('foo', 'bar', 0), [42]) >>> traverse({}, [\"foo\"]) Traceback (most", "definitions: dict, last_step: bool = False): # resolve $ref ref = nested_lookup(REF, sub_schema)", "get the new document. The new document is obtained by traversing the current", "$ref is used directly -> # means that we need to check definition", "key, src_schema in src.items(): try: if key in ( REF, TYPE, ): #", "document, a unique pointer inside the base document is made by namespacing the", "[\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo' >>> traverse([], [\"foo\"]) Traceback", "item (Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5() # nosec if isinstance(item, dict): item = {k:", "current_path ) def _handle_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object],", "sub_schema if ref: # [0] should be a single $ref in subschema on", "'a', 'foo': 'b'} >>> a, b = {'$ref': 'a'}, {'type': 'b'} >>> schema_merge(a,", "'c'])} >>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>> schema_merge(a, b,", ") return sub_properties except KeyError as e: LOG.debug(\"Malformed Schema or incorrect path provided\\n%s\\n%s\",", "{}) sub_properties = properties last_step = ( len(path) - 1 ) # get", "try: target[key] = schema_merge(target_schema, src_schema, next_path) except TypeError: if key in (TYPE, REF):", "# (taken from definitions) if last_step and REF not in sub_schema: # dont", "tuple(path), parent def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool = False): # resolve", "traverse_raw_schema({}, (\"foo\")) {} >>> traverse_raw_schema([], [\"foo\"]) Traceback (most recent call last): ... TypeError:", "identifier (e.g. '*'), otherwise traverse index and continue: The new document is obtained", "= document document = document[part] path.append(part) return document, tuple(path), parent def _resolve_ref(sub_schema: dict,", "is appended to the list of new documents. For each new document: The", "OrderedSet(value) if isinstance(value, (list, OrderedSet)) else OrderedSet([value]) ) class ConstraintError(FlatteningError, ValueError): def __init__(self,", "using the remaining path parts. The list of traversed documents and traversed paths", "[('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\")) ([[42, 43, 44]], [('foo',", "values \" \"for '{}': found '{}' and '{}'\" ) # pylint: disable=W0707 raise", "'a'}, {'type': ['b', 'c']} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>>", "( len(path) - 1 ) # get amount of steps to prevent deeper", "schemas must be dictionaries\") for key, src_schema in src.items(): try: if key in", "target[key] = src_schema else: next_path = path + (key,) try: target[key] = schema_merge(target_schema,", "to be inside of the base document. A relative JSON pointer is returned", "properties[0] return sub_schema # pylint: disable=C0301 def traverse_raw_schema(schema: dict, path: tuple): \"\"\"Traverse the", "REF, TYPE, ): # $ref and type are treated similarly and unified target_schema", "'b', 'c'])} >>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])} >>> schema_merge(a,", "# means that we need to check definition # otherwise it's an array", "= properties[0] return sub_schema # pylint: disable=C0301 def traverse_raw_schema(schema: dict, path: tuple): \"\"\"Traverse", "(\"foo\", \"bar\", \"*\", \"baz\")) ([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1,", "= BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite a reference to be inside of the base", "array and return subschema return sub_schema if ref: # [0] should be a", "path) return _handle_sequence_for_traverse(document, path_parts, path) def _handle_non_sequence_for_traverse( current_document: dict, current_path_parts: list, current_path: list", "{} >>> traverse_raw_schema([], [\"foo\"]) Traceback (most recent call last): ... TypeError: Schema must", "new document is obtained from the current document (a sequence) using the new", "set(src_schema)) else: if key in NON_MERGABLE_KEYS and target_schema != src_schema: msg = (", "REF not in sub_schema: # dont traverse deeper than requested # check if", "[42]) >>> traverse({}, [\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo' >>>", "= \"*\" class FlatteningError(Exception): pass def item_hash( item, ): # assumption -> input", "{\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\")) ([[42, 43, 44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\":", "$ref must follow #/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]] # resolve properties properties = nested_lookup(\"properties\",", ":raises ValueError, LookupError: the reference is invalid for this document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\":", "# $ref and type are treated similarly and unified target_schema = target.get(key) or", "'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo' >>>", "(TYPE, REF): # combining multiple $ref and types src_set = to_set(src_schema) try: target[TYPE]", "[42, 43, 44]}}, (\"foo\", \"bar\", \"*\")) ([42, 43, 44], [('foo', 'bar', 0), ('foo',", "For each new document: The remaining document is traversed using the remaining path", "a, b = {'$ref': 'a'}, {'type': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a',", "fragment identifier representation). If the reference is already inside the base document (:const:`BASE`),", "rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base, *parts = ref if", ".pointer import fragment_decode, fragment_encode LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\") TYPE =", "over existing properties except KeyError: target[key] = src_schema else: next_path = path +", ">>> a, b = {'$ref': 'a'}, {'type': ['b', 'c']} >>> schema_merge(a, b, ('foo',))", "resolved_paths.extend(newer_paths) return documents, resolved_paths def schema_merge(target, src, path): # noqa: C901 # pylint:", "= json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest() def to_set(value: Any) -> OrderedSet: return (", ">>> a, b = {'$ref': 'a'}, {'$ref': 'b'} >>> schema_merge(a, b, ('foo',)) {'type':", "new document. The new document is obtained by traversing the current document using", "= {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b',", "= [] for sequence_index in range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index]) new_document = traverse_path_for_sequence_members( current_document,", "[42]} >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\":", "properties = schema[\"properties\"] definitions = schema.get(\"definitions\", {}) sub_properties = properties last_step = (", "traverse index and continue: The new document is obtained from the current document", "current_path_parts, new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents, resolved_paths def schema_merge(target, src, path): #", "elif key == \"required\": target[key] = sorted(set(target_schema) | set(src_schema)) else: if key in", "base document. BASE = BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite a reference to be inside", "current document (a sequence) using the new path part as the index. The", "the sequence index. The new document is appended to the list of new", "to_set(value: Any) -> OrderedSet: return ( OrderedSet(value) if isinstance(value, (list, OrderedSet)) else OrderedSet([value])", "to_set( target[TYPE] ) # casting to ordered set as lib # implicitly converts", "def _handle_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\"", "*args): self.path = fragment_encode(path) message = message.format(*args, path=self.path) super().__init__(message) class BaseRefPlaceholder: \"\"\"A sentinel", "we need to check definition # otherwise it's an array and return subschema", "schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'$ref':", "2}]}}, (\"foo\", \"bar\", \"*\")) ([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo',", "path so far :raises ValueError, LookupError: the reference is invalid for this document", "item.items()} if isinstance(item, list): item = [item_hash(i) for i in item].sort() encoded =", "need to check definition # otherwise it's an array and return subschema return", "new path part as the key. The next path part is added to", "valid. Differing from traverse, this returns a list of documents and a list", ">>> rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base, *parts = ref", "-> Tuple[List[object], List[tuple]]: \"\"\" Check the new path part for the unpack sequence", "The list of traversed documents and traversed paths are returned. \"\"\" documents =", "b = {'$ref': 'a'}, {'type': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])}", "found '{}' and '{}'\" ) # pylint: disable=W0707 raise ConstraintError(msg, path, key, target_schema,", "definition # otherwise it's an array and return subschema return sub_schema if ref:", "= \"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\" class FlatteningError(Exception): pass def item_hash( item, ): #", "TypeError(\"Schema must be a dictionary\") try: properties = schema[\"properties\"] definitions = schema.get(\"definitions\", {})", "('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b',", "namespacing the reference under the remote base name inside the remote section. >>>", "the document according to the reference. Since the document is presumed to be", "in the list of path parts. The new document is obtained from the", "# e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] -> # { \"ref\": \"..#1..\", \"type\": [{},{}]", "in path_parts: if isinstance(document, Sequence): part = int(part) parent = document document =", "path '{path}' declared multiple values \" \"for '{}': found '{}' and '{}'\" )", "if not (isinstance(target, Mapping) and isinstance(src, Mapping)): raise TypeError(\"Both schemas must be dictionaries\")", "continues by recursively calling `traverse_path_for_sequence_members` \"\"\" part_to_handle = current_path_parts.pop(0) current_document = current_document[part_to_handle] current_path.append(part_to_handle)", ">>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',))", "and return subschema return sub_schema if ref: # [0] should be a single", "schema. Conflicting $ref could only # happen on combiners because method merges two", "representation). If the reference is already inside the base document (:const:`BASE`), the parts", "resolving $ref :raises TypeError: either schema is not of type dict :raises ConstraintError:", "\"bar\", \"*\")) ([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar',", "import nested_lookup from ordered_set import OrderedSet from .pointer import fragment_decode, fragment_encode LOG =", "OrderedSet from .pointer import fragment_decode, fragment_encode LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\")", "{'Foo': {'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE", "_handle_non_sequence_for_traverse(document, path_parts, path) return _handle_sequence_for_traverse(document, path_parts, path) def _handle_non_sequence_for_traverse( current_document: dict, current_path_parts: list,", "1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\",", "[] resolved_paths = [] new_documents = [] new_paths = [] for sequence_index in", "for this document >>> traverse({\"foo\": {\"bar\": [42]}}, tuple()) ({'foo': {'bar': [42]}}, (), None)", "#: The sentinel instance representing a reference inside the base document. BASE =", "[42]}}, [\"foo\"]) ({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\",", "{'bar': [42]}}, (), None) >>> traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"]) ({'bar': [42]}, ('foo',), {'foo':", "newer_paths = traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents, resolved_paths def", "1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\":", "the traversed path \"\"\" sequence_part = current_path_parts.pop(0) if sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse(", "43, 44]}}, tuple()) ([{'foo': {'bar': [42, 43, 44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42,", "traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"]) ({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}}) >>> traverse({\"foo\": {\"bar\":", "target schema in place. If there are duplicate keys, src will overwrite target.", "merges two json # objects without losing any previous info: # e.g. \"oneOf\":", "inside of the base document. A relative JSON pointer is returned (in URI", "the base document (:const:`BASE`), the parts are simply encoded into a pointer. If", "loop block in `traverse`: The next path part is the first part in", "'integer'}, ()) {'type': OrderedSet(['string', 'integer'])} \"\"\" if not (isinstance(target, Mapping) and isinstance(src, Mapping)):", "properties properties = nested_lookup(\"properties\", sub_schema) if properties: sub_schema = properties[0] return sub_schema #", "'a'}, {}, ()) {'foo': 'a'} >>> schema_merge({}, {'foo': 'a'}, ()) {'foo': 'a'} >>>", "current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document, current_path_parts, current_path ) def _handle_sequence_for_traverse( current_document: Sequence, current_path_parts: list,", "the target schema in place. If there are duplicate keys, src will overwrite", "override \"type\" or \"$ref\" >>> schema_merge({}, {}, ()) {} >>> schema_merge({'foo': 'a'}, {},", "document: document to traverse (dict or list) :parameter path_parts: document paths to traverse", "to the traversed path. The traversal continues by recursively calling `traverse_path_for_sequence_members` \"\"\" part_to_handle", "IndexError: list index out of range \"\"\" parent = None path = []", ") -> Tuple[List[object], List[tuple]]: \"\"\" Check the new path part for the unpack", "path part for the unpack sequence identifier (e.g. '*'), otherwise traverse index and", ">>> traverse_raw_schema([], [\"foo\"]) Traceback (most recent call last): ... TypeError: Schema must be", "recent call last): ... TypeError: Schema must be a dictionary \"\"\" if not", "__init__(self, message, path, *args): self.path = fragment_encode(path) message = message.format(*args, path=self.path) super().__init__(message) class", "either schema is not of type dict :raises ConstraintError: the schema tries to", "A relative JSON pointer is returned (in URI fragment identifier representation). If the", "last_step = ( len(path) - 1 ) # get amount of steps to", "TypeError: if key in (TYPE, REF): # combining multiple $ref and types src_set", "i in range(len(new_documents)): # pylint: disable=consider-using-enumerate new_document = new_documents[i] newer_documents, newer_paths = traverse_path_for_sequence_members(", "43, 44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, [\"foo\"]) ([{'bar': [42, 43,", "in ( REF, TYPE, ): # $ref and type are treated similarly and", "( OrderedSet(value) if isinstance(value, (list, OrderedSet)) else OrderedSet([value]) ) class ConstraintError(FlatteningError, ValueError): def", "OrderedSet(['a', 'b', 'c'])} >>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])} >>>", "\"bin\": 2}]}}, (\"foo\", \"bar\", \"*\")) ([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}],", "sub schema. Conflicting $ref could only # happen on combiners because method merges", "by traversing the current document using the sequence index. The new document is", "super().__init__(message) class BaseRefPlaceholder: \"\"\"A sentinel object representing a reference inside the base document.\"\"\"", "first part in the list of path parts. The new document is obtained", "'a'}, {'required': 'b'}, ()) {'required': ['a', 'b']} >>> a, b = {'$ref': 'a'},", ":raises ValueError, LookupError: the reference is invalid for this document >>> traverse({\"foo\": {\"bar\":", "\"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa: B950", "by namespacing the reference under the remote base name inside the remote section.", "def _handle_non_sequence_for_traverse( current_document: dict, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\"", "'c'])} >>> a, b = {'type': ['a', 'b']}, {'$ref': 'c'} >>> schema_merge(a, b,", "not of type dict :raises ConstraintError: the schema tries to override \"type\" or", "'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1,", "{'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b", "ref = nested_lookup(REF, sub_schema) # should be safe (always single value) # bc", "reference inside the base document. BASE = BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite a reference", "[\"foo\"]) Traceback (most recent call last): ... ValueError: invalid literal for int() with", "the new document. The new document is obtained by traversing the current document", "current_path.append(current_sequence_part) return [current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list )", "'b', 'c'])} >>> a, b = {'type': ['a', 'b']}, {'$ref': 'c'} >>> schema_merge(a,", "Tuple[List[object], List[tuple]]: \"\"\" When unpacking a sequence, we need to include multiple paths", "+ (key,) try: target[key] = schema_merge(target_schema, src_schema, next_path) except TypeError: if key in", "'foo' >>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most recent call last): ... ValueError: invalid literal", "([{'foo': {'bar': [42, 43, 44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, [\"foo\"])", "schema_merge({'foo': 'a'}, {'foo': 'b'}, ()) {'foo': 'b'} >>> schema_merge({'required': 'a'}, {'required': 'b'}, ())", "if properties: sub_schema = properties[0] return sub_schema # pylint: disable=C0301 def traverse_raw_schema(schema: dict,", "\"\"\" Check the new path part for the unpack sequence identifier (e.g. '*'),", "tuple): \"\"\"Traverse the raw json schema resolving $ref :raises TypeError: either schema is", "{\"bar\": [42, 43, 44]}}, tuple()) ([{'foo': {'bar': [42, 43, 44]}}], [()]) >>> traverse_path_for_sequence_members({\"foo\":", "{'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa: B950 [1] >>> traverse_raw_schema({}, (\"foo\")) {}", "= list(path_parts) if not isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document, path_parts, path) return _handle_sequence_for_traverse(document, path_parts,", "is no validation that the reference is valid. :raises ValueError, LookupError: the reference", "current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" When unpacking", "resolved_paths def schema_merge(target, src, path): # noqa: C901 # pylint: disable=R0912 \"\"\"Merges the", "= schema_merge(target_schema, src_schema, next_path) except TypeError: if key in (TYPE, REF): # combining", ">>> schema_merge({'required': 'a'}, {'required': 'b'}, ()) {'required': ['a', 'b']} >>> a, b =", "'b'])}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a,", "out of range \"\"\" parent = None path = [] for part in", "the list of new documents. For each new document: The remaining document is", "or target.get(TYPE) or target[REF] else: target_schema = target[key] # carry over existing properties", "{\"bar\": [42]}}, tuple()) ({'foo': {'bar': [42]}}, (), None) >>> traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"])", "(TypeError, KeyError): target_set = to_set(target_schema) target[TYPE] = target_set | src_set try: # check", "b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a',", "\"$ref\" >>> schema_merge({}, {}, ()) {} >>> schema_merge({'foo': 'a'}, {}, ()) {'foo': 'a'}", "return sub_schema if ref: # [0] should be a single $ref in subschema", "last): ... IndexError: list index out of range \"\"\" parent = None path", "(\"bar\",)) {'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa: B950 [1] >>> traverse_raw_schema({}, (\"foo\"))", "'b']}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a,", "$ref ref = nested_lookup(REF, sub_schema) # should be safe (always single value) #", "{\"bar\": [42]}}, (\"foo\", \"bar\")) ([42], ('foo', 'bar'), {'bar': [42]}) >>> traverse({\"foo\": {\"bar\": [42]}},", "if ref: # [0] should be a single $ref in subschema on the", "method merges two json # objects without losing any previous info: # e.g.", "2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({}, [\"foo\"]) Traceback", "= {'$ref': 'a'}, {'type': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>>", "Mapping)): raise TypeError(\"Both schemas must be dictionaries\") for key, src_schema in src.items(): try:", "part in the list of path parts. The new document is obtained from", "pylint: disable=C0301 def traverse_raw_schema(schema: dict, path: tuple): \"\"\"Traverse the raw json schema resolving", ":raises ConstraintError: the schema tries to override \"type\" or \"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\":", "sentinel object representing a reference inside the base document.\"\"\" def __repr__(self): \"\"\"Readable representation", "sub_properties except KeyError as e: LOG.debug(\"Malformed Schema or incorrect path provided\\n%s\\n%s\", path, e)", "dict, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Handling a non-sequence", "list of path parts. The new document is obtained from the current document", "must be a dictionary \"\"\" if not isinstance(schema, Mapping): raise TypeError(\"Schema must be", "[42]}}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\")) ([42], ('foo', 'bar'), {'bar': [42]}) >>>", "discarded. There is no validation that the reference is valid. :raises ValueError, LookupError:", ">>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\", \"0\")) (42, ('foo', 'bar', 0), [42]) >>>", "# otherwise it's an array and return subschema return sub_schema if ref: #", "path: tuple): \"\"\"Traverse the raw json schema resolving $ref :raises TypeError: either schema", "'integer'])} \"\"\" if not (isinstance(target, Mapping) and isinstance(src, Mapping)): raise TypeError(\"Both schemas must", "b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': ['a', 'b']},", "for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate new_document = new_documents[i] newer_documents, newer_paths =", "[42]}}, (), None) >>> traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"]) ({'bar': [42]}, ('foo',), {'foo': {'bar':", "int(part) parent = document document = document[part] path.append(part) return document, tuple(path), parent def", "ValueError): def __init__(self, message, path, *args): self.path = fragment_encode(path) message = message.format(*args, path=self.path)", "\"\"\" documents = [] resolved_paths = [] new_documents = [] new_paths = []", "for i in item].sort() encoded = json.dumps(item, sort_keys=True).encode() dhash.update(encoded) return dhash.hexdigest() def to_set(value:", "not BASE: parts = [\"remote\", base] + parts return fragment_encode(parts) def traverse(document, path_parts):", "literal for int() with base 10: 'foo' >>> traverse([], [0]) Traceback (most recent", "[\"foo\"]) Traceback (most recent call last): ... TypeError: Schema must be a dictionary", "is obtained from the current document using the new path part as the", "current_path.copy() )[0] new_documents.extend(new_document) for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate new_document = new_documents[i]", "for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return \"<BASE>\" #: The sentinel instance representing", "OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'}, {'type': 'integer'}, ()) {'type': OrderedSet(['string', 'integer'])} \"\"\" if", "valid index current_sequence_part = int(sequence_part) current_document = current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document], [tuple(current_path)] def", "(\"foo\", \"bar\")) ([42], ('foo', 'bar'), {'bar': [42]}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\",", "path parts. The new document is obtained from the current document using the", "is discarded. There is no validation that the reference is valid. Differing from", "isinstance(src, Mapping)): raise TypeError(\"Both schemas must be dictionaries\") for key, src_schema in src.items():", "('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest:", "['a', 'b']}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>>", "current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document, current_path_parts, current_path ) def _handle_sequence_for_traverse( current_document: Sequence, current_path_parts:", "BaseRefPlaceholder() def rewrite_ref(ref): \"\"\"Rewrite a reference to be inside of the base document.", "a dictionary\") try: properties = schema[\"properties\"] definitions = schema.get(\"definitions\", {}) sub_properties = properties", "traverse(document, path_parts): \"\"\"Traverse the document according to the reference. Since the document is", "Sequence from typing import Any, List, Tuple from nested_lookup import nested_lookup from ordered_set", "newer_documents, newer_paths = traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents, resolved_paths", ") def _handle_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]:", "{'bar': [42]}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\", \"0\")) (42, ('foo', 'bar', 0),", "return traverse_path_for_sequence_members( current_document, current_path_parts, current_path ) def _handle_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path:", "= target_set | src_set try: # check if there are conflicting $ref and", "= [] if not path_parts: return [document], [tuple(path)] path_parts = list(path_parts) if not", "\"\"\" return \"<BASE>\" #: The sentinel instance representing a reference inside the base", "and multiple documents, one for each sequence member. For each sequence member: Append", "or incorrect path provided\\n%s\\n%s\", path, e) return {} def traverse_path_for_sequence_members( document: dict, path_parts:", "from the current document (a sequence) using the new path part as the", "in range(len(new_documents)): # pylint: disable=consider-using-enumerate new_document = new_documents[i] newer_documents, newer_paths = traverse_path_for_sequence_members( new_document,", "is already inside the base document (:const:`BASE`), the parts are simply encoded into", "43, 44]}}, [\"foo\"]) ([{'bar': [42, 43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43,", "src, path): # noqa: C901 # pylint: disable=R0912 \"\"\"Merges the src schema into", "index, and get the new document. The new document is obtained by traversing", "(isinstance(target, Mapping) and isinstance(src, Mapping)): raise TypeError(\"Both schemas must be dictionaries\") for key,", "doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'}, {'type': 'integer'}, ()) {'type': OrderedSet(['string',", "list = None ) -> Tuple[List[object], List[tuple]]: \"\"\"Traverse the paths for all sequence", "return _handle_non_sequence_for_traverse(document, path_parts, path) return _handle_sequence_for_traverse(document, path_parts, path) def _handle_non_sequence_for_traverse( current_document: dict, current_path_parts:", "UNPACK_SEQUENCE_IDENTIFIER = \"*\" class FlatteningError(Exception): pass def item_hash( item, ): # assumption ->", "return _handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path ) # otherwise, sequence part should be a", "= traverse_path_for_sequence_members( current_document, [sequence_index] + current_path_parts, current_path.copy() )[0] new_documents.extend(new_document) for i in range(len(new_documents)):", "\"0\")) (42, ('foo', 'bar', 0), [42]) >>> traverse({}, [\"foo\"]) Traceback (most recent call", "document, tuple(path), parent def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool = False): #", "pointer. If the reference is outside of the base document, a unique pointer", "obtained by traversing the current document using the sequence index. The new document", "'string'}, {'type': 'integer'}, ()) {'type': OrderedSet(['string', 'integer'])} \"\"\" if not (isinstance(target, Mapping) and", "src_schema in src.items(): try: if key in ( REF, TYPE, ): # $ref", "if $ref is used directly -> # means that we need to check", "reference is invalid for this document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, tuple())", "pylint: disable=R0912 \"\"\"Merges the src schema into the target schema in place. If", "resolved_paths = [] new_documents = [] new_paths = [] for sequence_index in range(len(current_document)):", "because method merges two json # objects without losing any previous info: #", "is only json comparable type (dict/list/scalar) \"\"\"MD5 hash for an item (Dictionary/Iterable/Scalar)\"\"\" dhash", "[sequence_index] + current_path_parts, current_path.copy() )[0] new_documents.extend(new_document) for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate", "disable=C0301 def traverse_raw_schema(schema: dict, path: tuple): \"\"\"Traverse the raw json schema resolving $ref", "range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index]) new_document = traverse_path_for_sequence_members( current_document, [sequence_index] + current_path_parts, current_path.copy() )[0]", "in sub_schema: # dont traverse deeper than requested # check if $ref is", "non-sequence member for `traverse_path_for_sequence_members` is like the loop block in `traverse`: The next", "current_path_parts, current_path ) def _handle_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) ->", "dhash = hashlib.md5() # nosec if isinstance(item, dict): item = {k: item_hash(v) for", "name inside the remote section. >>> rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#'", "is the first part in the list of path parts. The new document", "# dont traverse deeper than requested # check if $ref is used directly", "path_parts: return [document], [tuple(path)] path_parts = list(path_parts) if not isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document,", "\"bar\")) ([[42, 43, 44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\",", "'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1,", "Append the traversed paths w/ the sequence index, and get the new document.", ">>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\",", "UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document, current_path_parts, current_path ) # otherwise, sequence part should be", "schema_merge({'foo': 'a'}, {}, ()) {'foo': 'a'} >>> schema_merge({}, {'foo': 'a'}, ()) {'foo': 'a'}", ">>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return \"<BASE>\" #: The sentinel instance representing a reference", "OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'$ref': 'b'} >>> schema_merge(a, b,", "[42]}, ('foo',), {'foo': {'bar': [42]}}) >>> traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\")) ([42], ('foo',", ">>> traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"]) ({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}}) >>> traverse({\"foo\":", "= (\"uniqueItems\", \"insertionOrder\") TYPE = \"type\" REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\" class", "amount of steps to prevent deeper traversal than requested for step in path:", "{'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'}, {'type':", "sub_schema) # should be safe (always single value) # bc sub_schema is always", "is invalid for this document >>> traverse({\"foo\": {\"bar\": [42]}}, tuple()) ({'foo': {'bar': [42]}},", "the base document.\"\"\" def __repr__(self): \"\"\"Readable representation for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\"", "_handle_non_sequence_for_traverse( current_document: dict, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Handling", "like the loop block in `traverse`: The next path part is the first", "src_schema: msg = ( \"Object at path '{path}' declared multiple values \" \"for", "b = {'type': ['a', 'b']}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a',", "if not path_parts: return [document], [tuple(path)] path_parts = list(path_parts) if not isinstance(document, Sequence):", "{'foo': 'b'}, ()) {'foo': 'b'} >>> schema_merge({'required': 'a'}, {'required': 'b'}, ()) {'required': ['a',", "using the sequence index. The new document is appended to the list of", "paths w/ the sequence index, and get the new document. The new document", "'bar', 2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}},", "[] new_paths = [] for sequence_index in range(len(current_document)): new_paths.append(current_path.copy() + [sequence_index]) new_document =", "type are treated similarly and unified target_schema = target.get(key) or target.get(TYPE) or target[REF]", "parent = document document = document[part] path.append(part) return document, tuple(path), parent def _resolve_ref(sub_schema:", "b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'type': ['b',", "('foo',)) {'$ref': 'a', 'foo': 'b'} >>> a, b = {'$ref': 'a'}, {'type': 'b'}", "noqa: B950 [1] >>> traverse_raw_schema({}, (\"foo\")) {} >>> traverse_raw_schema([], [\"foo\"]) Traceback (most recent", "index and continue: The new document is obtained from the current document (a", "from typing import Any, List, Tuple from nested_lookup import nested_lookup from ordered_set import", "NON_MERGABLE_KEYS and target_schema != src_schema: msg = ( \"Object at path '{path}' declared", ">>> schema_merge({'type': 'string'}, {'type': 'integer'}, ()) {'type': OrderedSet(['string', 'integer'])} \"\"\" if not (isinstance(target,", "return documents, resolved_paths def schema_merge(target, src, path): # noqa: C901 # pylint: disable=R0912", "'c'])} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b =", "target_schema = target.get(key) or target.get(TYPE) or target[REF] else: target_schema = target[key] # carry", "recent call last): ... IndexError: list index out of range \"\"\" if path", "_handle_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Check", "def _handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts: list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\"", "two json # objects without losing any previous info: # e.g. \"oneOf\": [{\"$ref\":", "reference is valid. Differing from traverse, this returns a list of documents and", "list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Check the new path part", "document is appended to the list of new documents. For each new document:", "of range \"\"\" parent = None path = [] for part in path_parts:", "the parts are simply encoded into a pointer. If the reference is outside", "message.format(*args, path=self.path) super().__init__(message) class BaseRefPlaceholder: \"\"\"A sentinel object representing a reference inside the", ":raises ConstraintError: the schema tries to override \"type\" or \"$ref\" >>> schema_merge({}, {},", "outside of the base document, a unique pointer inside the base document is", "an item (Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5() # nosec if isinstance(item, dict): item =", "(dict/list/scalar) \"\"\"MD5 hash for an item (Dictionary/Iterable/Scalar)\"\"\" dhash = hashlib.md5() # nosec if", "[42]}}, tuple()) ({'foo': {'bar': [42]}}, (), None) >>> traverse({\"foo\": {\"bar\": [42]}}, [\"foo\"]) ({'bar':", "'b'])} >>> schema_merge({'type': 'string'}, {'type': 'integer'}, ()) {'type': OrderedSet(['string', 'integer'])} \"\"\" if not", "KeyError: target[key] = src_schema else: next_path = path + (key,) try: target[key] =", "= [\"remote\", base] + parts return fragment_encode(parts) def traverse(document, path_parts): \"\"\"Traverse the document", "new document is obtained from the current document using the new path part", "False): # resolve $ref ref = nested_lookup(REF, sub_schema) # should be safe (always", "{'$ref': 'a'}, {'type': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a,", "= {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}} >>> schema_merge(a, b, ('foo',)) {'Foo': {'type':", "item_hash(v) for k, v in item.items()} if isinstance(item, list): item = [item_hash(i) for", "(\"foo\", \"bar\", \"*\")) ([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar',", "List, Tuple from nested_lookup import nested_lookup from ordered_set import OrderedSet from .pointer import", "LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\") TYPE = \"type\" REF = \"$ref\"", "([{'bar': [42, 43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\"))", "import Mapping, Sequence from typing import Any, List, Tuple from nested_lookup import nested_lookup", "\"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple()) {'bar': [42]} >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",))", "types src_set = to_set(src_schema) try: target[TYPE] = to_set( target[TYPE] ) # casting to", "check definition # otherwise it's an array and return subschema return sub_schema if", "current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Handling a non-sequence member for `traverse_path_for_sequence_members`", "one for each sequence member. For each sequence member: Append the traversed paths", "# get amount of steps to prevent deeper traversal than requested for step", "class FlatteningError(Exception): pass def item_hash( item, ): # assumption -> input is only", "schema resolving $ref :raises TypeError: either schema is not of type dict :raises", "a, b = {'$ref': 'a'}, {'type': ['b', 'c']} >>> schema_merge(a, b, ('foo',)) {'type':", "document using the new path part as the key. The next path part", "= current_path_parts.pop(0) current_document = current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document, current_path_parts, current_path ) def", "= traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents, resolved_paths def schema_merge(target,", "path_parts = list(path_parts) if not isinstance(document, Sequence): return _handle_non_sequence_for_traverse(document, path_parts, path) return _handle_sequence_for_traverse(document,", ">>> rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote' \"\"\" base,", "The traversal continues by recursively calling `traverse_path_for_sequence_members` \"\"\" part_to_handle = current_path_parts.pop(0) current_document =", "KeyError: 'foo' >>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most recent call last): ... ValueError: invalid", "any previous info: # e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] -> # { \"ref\":", "traverse({\"foo\": {\"bar\": [42]}}, (\"foo\", \"bar\", \"0\")) (42, ('foo', 'bar', 0), [42]) >>> traverse({},", "Traceback (most recent call last): ... TypeError: Schema must be a dictionary \"\"\"", "otherwise it's an array and return subschema return sub_schema if ref: # [0]", "sub_properties = properties last_step = ( len(path) - 1 ) # get amount", "fragment_encode(parts) def traverse(document, path_parts): \"\"\"Traverse the document according to the reference. Since the", "current_document = current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts: list,", "When unpacking a sequence, we need to include multiple paths and multiple documents,", "int() with base 10: 'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback (most recent call last):", "... KeyError: 'foo' >>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most recent call last): ... ValueError:", "except (TypeError, KeyError): target_set = to_set(target_schema) target[TYPE] = target_set | src_set try: #", "... ValueError: invalid literal for int() with base 10: 'foo' >>> traverse_path_for_sequence_members([], [0])", "documents, one for each sequence member. For each sequence member: Append the traversed", "the reference is invalid for this document >>> traverse({\"foo\": {\"bar\": [42]}}, tuple()) ({'foo':", "OrderedSet(['a', 'b', 'c'])} >>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}}", "list): item = [item_hash(i) for i in item].sort() encoded = json.dumps(item, sort_keys=True).encode() dhash.update(encoded)", "return sub_schema # pylint: disable=C0301 def traverse_raw_schema(schema: dict, path: tuple): \"\"\"Traverse the raw", ":parameter path_parts: document paths to traverse :parameter path: traversed path so far :raises", "remote base name inside the remote section. >>> rewrite_ref((BASE, \"foo\", \"bar\")) '#/foo/bar' >>>", "for this document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, tuple()) ([{'foo': {'bar': [42,", "remaining document is traversed using the remaining path parts. The list of traversed", "44]}}, [\"foo\"]) ([{'bar': [42, 43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}},", "Tuple[List[object], List[tuple]]: \"\"\" Check the new path part for the unpack sequence identifier", "previous info: # e.g. \"oneOf\": [{\"$ref\": \"..#1..\"},{\"$ref\": \"..#2..\"}] -> # { \"ref\": \"..#1..\",", "repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return \"<BASE>\" #: The sentinel instance representing a reference inside", "KeyError): target_set = to_set(target_schema) target[TYPE] = target_set | src_set try: # check if", "2)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\",", "[42]}}, tuple()) {'bar': [42]} >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, (\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\": {\"bar\":", "= False): # resolve $ref ref = nested_lookup(REF, sub_schema) # should be safe", "a reference inside the base document.\"\"\" def __repr__(self): \"\"\"Readable representation for debugging. >>>", "def traverse_path_for_sequence_members( document: dict, path_parts: Sequence, path: list = None ) -> Tuple[List[object],", "'a'}, {'foo': 'b'}, ()) {'foo': 'b'} >>> schema_merge({'required': 'a'}, {'required': 'b'}, ()) {'required':", "= sorted(set(target_schema) | set(src_schema)) else: if key in NON_MERGABLE_KEYS and target_schema != src_schema:", "[42] >>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'} >>>", "for step in path: sub_properties = _resolve_ref( sub_properties[step], definitions, last_step=path.index(step) == last_step, )", "traverse_path_for_sequence_members( current_document, [sequence_index] + current_path_parts, current_path.copy() )[0] new_documents.extend(new_document) for i in range(len(new_documents)): #", "()) {} >>> schema_merge({'foo': 'a'}, {}, ()) {'foo': 'a'} >>> schema_merge({}, {'foo': 'a'},", "[sequence_index]) new_document = traverse_path_for_sequence_members( current_document, [sequence_index] + current_path_parts, current_path.copy() )[0] new_documents.extend(new_document) for i", "means that we need to check definition # otherwise it's an array and", "path part is the first part in the list of path parts. The", "44]}}, (\"foo\", \"bar\", \"*\")) ([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1),", "('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'$ref': 'b'} >>>", "OrderedSet)) else OrderedSet([value]) ) class ConstraintError(FlatteningError, ValueError): def __init__(self, message, path, *args): self.path", "[0]) Traceback (most recent call last): ... IndexError: list index out of range", "return sub_properties except KeyError as e: LOG.debug(\"Malformed Schema or incorrect path provided\\n%s\\n%s\", path,", "the current document (a sequence) using the new path part as the index.", "and a list of resolved paths. :parameter document: document to traverse (dict or", "except KeyError: target[key] = src_schema else: next_path = path + (key,) try: target[key]", "is added to the traversed path. The traversal continues by recursively calling `traverse_path_for_sequence_members`", "None ) -> Tuple[List[object], List[tuple]]: \"\"\"Traverse the paths for all sequence members in", "return _handle_sequence_for_traverse(document, path_parts, path) def _handle_non_sequence_for_traverse( current_document: dict, current_path_parts: list, current_path: list )", "(key,) try: target[key] = schema_merge(target_schema, src_schema, next_path) except TypeError: if key in (TYPE,", "'a'}, ()) {'foo': 'a'} >>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ()) {'foo': 'b'} >>>", "{'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])}", "'{}' and '{}'\" ) # pylint: disable=W0707 raise ConstraintError(msg, path, key, target_schema, src_schema)", ") documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents, resolved_paths def schema_merge(target, src, path): # noqa: C901", "2}]}}, (\"foo\", \"bar\", \"*\", \"baz\")) ([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar',", "the paths for all sequence members in the document according to the reference.", "'<BASE>' \"\"\" return \"<BASE>\" #: The sentinel instance representing a reference inside the", "hashlib.md5() # nosec if isinstance(item, dict): item = {k: item_hash(v) for k, v", "are conflicting $ref and type # at the same sub schema. Conflicting $ref", "not path_parts: return [document], [tuple(path)] path_parts = list(path_parts) if not isinstance(document, Sequence): return", "ref if base is not BASE: parts = [\"remote\", base] + parts return", "assumption -> input is only json comparable type (dict/list/scalar) \"\"\"MD5 hash for an", "traverse, this returns a list of documents and a list of resolved paths.", "src_schema else: next_path = path + (key,) try: target[key] = schema_merge(target_schema, src_schema, next_path)", "traversal than requested for step in path: sub_properties = _resolve_ref( sub_properties[step], definitions, last_step=path.index(step)", "the unpack sequence identifier (e.g. '*'), otherwise traverse index and continue: The new", "BaseRefPlaceholder: \"\"\"A sentinel object representing a reference inside the base document.\"\"\" def __repr__(self):", "base is not BASE: parts = [\"remote\", base] + parts return fragment_encode(parts) def", "return dhash.hexdigest() def to_set(value: Any) -> OrderedSet: return ( OrderedSet(value) if isinstance(value, (list,", "= {'type': ['a', 'b']}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b',", "\"ref\": \"..#1..\", \"type\": [{},{}] } target.pop(REF) except KeyError: pass elif key == \"required\":", "obtained from the current document using the new path part as the key.", "schema_merge({'type': 'string'}, {'type': 'integer'}, ()) {'type': OrderedSet(['string', 'integer'])} \"\"\" if not (isinstance(target, Mapping)", "\"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\", \"baz\")) ([1, 2], [('foo',", "'b']} >>> a, b = {'$ref': 'a'}, {'foo': 'b'} >>> schema_merge(a, b, ('foo',))", "member: Append the traversed paths w/ the sequence index, and get the new", "target[REF] else: target_schema = target[key] # carry over existing properties except KeyError: target[key]", "'boolean'} >>> traverse_raw_schema({\"definitions\":{\"b\":[1],\"f\":{\"properties\":{\"b\":{\"$ref\":\"#/definitions/b\"}}}},\"properties\":{\"f\":{\"$ref\":\"#/definitions/f\"}}},(\"f\", \"b\")) # noqa: B950 [1] >>> traverse_raw_schema({}, (\"foo\")) {} >>>", "'b'} >>> schema_merge({'required': 'a'}, {'required': 'b'}, ()) {'required': ['a', 'b']} >>> a, b", "is discarded. There is no validation that the reference is valid. :raises ValueError,", "traverse({}, [\"foo\"]) Traceback (most recent call last): ... KeyError: 'foo' >>> traverse([], [\"foo\"])", "\"bar\")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>> rewrite_ref((\"remote\", \"foo\", \"bar\")) '#/remote/remote/foo/bar' >>> rewrite_ref((\"remote\",)) '#/remote/remote'", "inside the base document (:const:`BASE`), the parts are simply encoded into a pointer.", "as the key. The next path part is added to the traversed path.", "'foo': 'b'} >>> a, b = {'$ref': 'a'}, {'type': 'b'} >>> schema_merge(a, b,", "should be safe (always single value) # bc sub_schema is always per paranet", "it's an array and return subschema return sub_schema if ref: # [0] should", "item_hash( item, ): # assumption -> input is only json comparable type (dict/list/scalar)", "): # assumption -> input is only json comparable type (dict/list/scalar) \"\"\"MD5 hash", "the traversed paths w/ the sequence index, and get the new document. The", "and type are treated similarly and unified target_schema = target.get(key) or target.get(TYPE) or", "(\"bar\",)) [42] >>> traverse_raw_schema({\"definitions\": {\"bar\": {\"type\": \"boolean\"}},\"properties\": {\"bar\": {\"$ref\": \"#/definitions/bar\"}}}, (\"bar\",)) {'type': 'boolean'}", "(dict or list) :parameter path_parts: document paths to traverse :parameter path: traversed path", "in NON_MERGABLE_KEYS and target_schema != src_schema: msg = ( \"Object at path '{path}'", "is presumed to be the reference's base, the base is discarded. There is", "to traverse :parameter path: traversed path so far :raises ValueError, LookupError: the reference", "traverse deeper than requested # check if $ref is used directly -> #", "must follow #/definitions/object sub_schema = definitions[fragment_decode(ref[0])[-1]] # resolve properties properties = nested_lookup(\"properties\", sub_schema)", "override \"type\" or \"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple()) {'bar': [42]} >>> traverse_raw_schema({\"properties\":", "a sequence, we need to include multiple paths and multiple documents, one for", "properties last_step = ( len(path) - 1 ) # get amount of steps", "(list, OrderedSet)) else OrderedSet([value]) ) class ConstraintError(FlatteningError, ValueError): def __init__(self, message, path, *args):", ">>> a, b = {'type': ['a', 'b']}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',))", "Sequence, path: list = None ) -> Tuple[List[object], List[tuple]]: \"\"\"Traverse the paths for", "safe (always single value) # bc sub_schema is always per paranet property #", "documents = [] resolved_paths = [] new_documents = [] new_paths = [] for", "if isinstance(item, dict): item = {k: item_hash(v) for k, v in item.items()} if", "43, 44]}}, (\"foo\", \"bar\")) ([[42, 43, 44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42,", "else: target_schema = target[key] # carry over existing properties except KeyError: target[key] =", "we need to include multiple paths and multiple documents, one for each sequence", "traverse :parameter path: traversed path so far :raises ValueError, LookupError: the reference is", "\"\"\"Traverse the document according to the reference. Since the document is presumed to", "last_step=path.index(step) == last_step, ) return sub_properties except KeyError as e: LOG.debug(\"Malformed Schema or", "path \"\"\" sequence_part = current_path_parts.pop(0) if sequence_part == UNPACK_SEQUENCE_IDENTIFIER: return _handle_unpack_sequence_for_traverse( current_document, current_path_parts,", "parts = [\"remote\", base] + parts return fragment_encode(parts) def traverse(document, path_parts): \"\"\"Traverse the", "ValueError, LookupError: the reference is invalid for this document >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42,", "schema_merge({}, {}, ()) {} >>> schema_merge({'foo': 'a'}, {}, ()) {'foo': 'a'} >>> schema_merge({},", "list of new documents. For each new document: The remaining document is traversed", "= logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\") TYPE = \"type\" REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER", "1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\")) ([{'baz': 1, 'bin': 1}, {'baz':", "document.\"\"\" def __repr__(self): \"\"\"Readable representation for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return \"<BASE>\"", "{'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type':", "# nosec if isinstance(item, dict): item = {k: item_hash(v) for k, v in", "be a dictionary \"\"\" if not isinstance(schema, Mapping): raise TypeError(\"Schema must be a", "traverse({\"foo\": {\"bar\": [42]}}, tuple()) ({'foo': {'bar': [42]}}, (), None) >>> traverse({\"foo\": {\"bar\": [42]}},", "base] + parts return fragment_encode(parts) def traverse(document, path_parts): \"\"\"Traverse the document according to", "[\"foo\"]) ([{'bar': [42, 43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\",", "schema is not of type dict :raises ConstraintError: the schema tries to override", "sub_properties = _resolve_ref( sub_properties[step], definitions, last_step=path.index(step) == last_step, ) return sub_properties except KeyError", ">>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\",", "{'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'}, {'type': 'integer'}, ()) {'type': OrderedSet(['string', 'integer'])} \"\"\"", "tries to override \"type\" or \"$ref\" >>> traverse_raw_schema({\"properties\": {\"bar\": [42]}}, tuple()) {'bar': [42]}", "fragment_encode LOG = logging.getLogger(__name__) NON_MERGABLE_KEYS = (\"uniqueItems\", \"insertionOrder\") TYPE = \"type\" REF =", "# pylint: disable=consider-using-enumerate new_document = new_documents[i] newer_documents, newer_paths = traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i]", "the base document. A relative JSON pointer is returned (in URI fragment identifier", "b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'Foo': {'$ref': 'a'}},", "def __init__(self, message, path, *args): self.path = fragment_encode(path) message = message.format(*args, path=self.path) super().__init__(message)", "reference. Since the document is presumed to be the reference's base, the base", "path_parts: Sequence, path: list = None ) -> Tuple[List[object], List[tuple]]: \"\"\"Traverse the paths", "path + (key,) try: target[key] = schema_merge(target_schema, src_schema, next_path) except TypeError: if key", "last): ... KeyError: 'foo' >>> traverse_path_for_sequence_members([], [\"foo\"]) Traceback (most recent call last): ...", "traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\": 2}]}}, (\"foo\", \"bar\", \"*\",", "\"\"\" part_to_handle = current_path_parts.pop(0) current_document = current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members( current_document, current_path_parts, current_path", "otherwise traverse index and continue: The new document is obtained from the current", "[42, 43, 44]}}, [\"foo\"]) ([{'bar': [42, 43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42,", "dict :raises ConstraintError: the schema tries to override \"type\" or \"$ref\" >>> traverse_raw_schema({\"properties\":", "document (a sequence) using the new path part as the index. The next", "\"\"\" if not (isinstance(target, Mapping) and isinstance(src, Mapping)): raise TypeError(\"Both schemas must be", "} target.pop(REF) except KeyError: pass elif key == \"required\": target[key] = sorted(set(target_schema) |", "= to_set(target_schema) target[TYPE] = target_set | src_set try: # check if there are", "part should be a valid index current_sequence_part = int(sequence_part) current_document = current_document[current_sequence_part] current_path.append(current_sequence_part)", "new path part for the unpack sequence identifier (e.g. '*'), otherwise traverse index", "or list) :parameter path_parts: document paths to traverse :parameter path: traversed path so", "recursively calling `traverse_path_for_sequence_members` \"\"\" part_to_handle = current_path_parts.pop(0) current_document = current_document[part_to_handle] current_path.append(part_to_handle) return traverse_path_for_sequence_members(", "{'type': ['b', 'c']} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a,", "... KeyError: 'foo' >>> traverse([], [\"foo\"]) Traceback (most recent call last): ... ValueError:", "definitions) if last_step and REF not in sub_schema: # dont traverse deeper than", "= _resolve_ref( sub_properties[step], definitions, last_step=path.index(step) == last_step, ) return sub_properties except KeyError as", ">>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'$ref':", "provided\\n%s\\n%s\", path, e) return {} def traverse_path_for_sequence_members( document: dict, path_parts: Sequence, path: list", "()) {'foo': 'a'} >>> schema_merge({}, {'foo': 'a'}, ()) {'foo': 'a'} >>> schema_merge({'foo': 'a'},", "traverse (dict or list) :parameter path_parts: document paths to traverse :parameter path: traversed", "list, current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Handling a non-sequence member for", "a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])} >>> schema_merge(a, b, ('foo',)) {'type':", "current document using the sequence index. The new document is appended to the", "= int(part) parent = document document = document[part] path.append(part) return document, tuple(path), parent", "not isinstance(schema, Mapping): raise TypeError(\"Schema must be a dictionary\") try: properties = schema[\"properties\"]", "call last): ... IndexError: list index out of range \"\"\" if path is", "in subschema on the top level # [-1] $ref must follow #/definitions/object sub_schema", "invalid literal for int() with base 10: 'foo' >>> traverse([], [0]) Traceback (most", "debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' \"\"\" return \"<BASE>\" #: The sentinel instance representing a", "and traversed paths are returned. \"\"\" documents = [] resolved_paths = [] new_documents", "# resolve $ref ref = nested_lookup(REF, sub_schema) # should be safe (always single", "current_path: list ) -> Tuple[List[object], List[tuple]]: \"\"\" Check the new path part for", "\"insertionOrder\") TYPE = \"type\" REF = \"$ref\" UNPACK_SEQUENCE_IDENTIFIER = \"*\" class FlatteningError(Exception): pass", "= schema[\"properties\"] definitions = schema.get(\"definitions\", {}) sub_properties = properties last_step = ( len(path)", "\"\"\" When unpacking a sequence, we need to include multiple paths and multiple", "typing import Any, List, Tuple from nested_lookup import nested_lookup from ordered_set import OrderedSet", "of the base document. A relative JSON pointer is returned (in URI fragment", "('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [{\"baz\": 1, \"bin\": 1}, {\"baz\": 2, \"bin\":", "(most recent call last): ... IndexError: list index out of range \"\"\" if", ":raises TypeError: either schema is not of type dict :raises ConstraintError: the schema", "list index out of range \"\"\" if path is None: path = []", "in src.items(): try: if key in ( REF, TYPE, ): # $ref and", "nested_lookup(\"properties\", sub_schema) if properties: sub_schema = properties[0] return sub_schema # pylint: disable=C0301 def", "in (TYPE, REF): # combining multiple $ref and types src_set = to_set(src_schema) try:", "... IndexError: list index out of range \"\"\" parent = None path =", "|= src_set except (TypeError, KeyError): target_set = to_set(target_schema) target[TYPE] = target_set | src_set", "schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'$ref': 'a'},", "path provided\\n%s\\n%s\", path, e) return {} def traverse_path_for_sequence_members( document: dict, path_parts: Sequence, path:", "base 10: 'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback (most recent call last): ... IndexError:", "$ref :raises TypeError: either schema is not of type dict :raises ConstraintError: the", ">>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'Foo':", "= to_set( target[TYPE] ) # casting to ordered set as lib # implicitly", "documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents, resolved_paths def schema_merge(target, src, path): # noqa: C901 #", "in path: sub_properties = _resolve_ref( sub_properties[step], definitions, last_step=path.index(step) == last_step, ) return sub_properties", "check if there are conflicting $ref and type # at the same sub", "call last): ... TypeError: Schema must be a dictionary \"\"\" if not isinstance(schema,", "only # happen on combiners because method merges two json # objects without", "# resolve properties properties = nested_lookup(\"properties\", sub_schema) if properties: sub_schema = properties[0] return", "int(sequence_part) current_document = current_document[current_sequence_part] current_path.append(current_sequence_part) return [current_document], [tuple(current_path)] def _handle_unpack_sequence_for_traverse( current_document: Sequence, current_path_parts:", "\"\"\" base, *parts = ref if base is not BASE: parts = [\"remote\",", "should be a valid index current_sequence_part = int(sequence_part) current_document = current_document[current_sequence_part] current_path.append(current_sequence_part) return", "( \"Object at path '{path}' declared multiple values \" \"for '{}': found '{}'", "TYPE, ): # $ref and type are treated similarly and unified target_schema =", "except KeyError: pass elif key == \"required\": target[key] = sorted(set(target_schema) | set(src_schema)) else:", "ConstraintError(FlatteningError, ValueError): def __init__(self, message, path, *args): self.path = fragment_encode(path) message = message.format(*args,", "([[42, 43, 44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({\"foo\": {\"bar\": [42, 43, 44]}}, (\"foo\", \"bar\",", "new_documents[i] newer_documents, newer_paths = traverse_path_for_sequence_members( new_document, current_path_parts, new_paths[i] ) documents.extend(newer_documents) resolved_paths.extend(newer_paths) return documents,", "place. If there are duplicate keys, src will overwrite target. :raises TypeError: either", "\"type\" or \"$ref\" >>> schema_merge({}, {}, ()) {} >>> schema_merge({'foo': 'a'}, {}, ())", "# pylint: disable=R0912 \"\"\"Merges the src schema into the target schema in place.", "otherwise, sequence part should be a valid index current_sequence_part = int(sequence_part) current_document =", "KeyError: pass elif key == \"required\": target[key] = sorted(set(target_schema) | set(src_schema)) else: if" ]
[ "describe_cli(): def describe_signout(): def it_can_force_signin(runner): result = runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code) == 0", "main @pytest.fixture def runner(): return CliRunner() def describe_cli(): def describe_signout(): def it_can_force_signin(runner): result", "click.testing import CliRunner from expecter import expect from slackoff.cli import main @pytest.fixture def", "CliRunner from expecter import expect from slackoff.cli import main @pytest.fixture def runner(): return", "runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code) == 0 expect(result.output) == \"Currently signed out of Foobar\\n\"", "import main @pytest.fixture def runner(): return CliRunner() def describe_cli(): def describe_signout(): def it_can_force_signin(runner):", "CliRunner() def describe_cli(): def describe_signout(): def it_can_force_signin(runner): result = runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code)", "import CliRunner from expecter import expect from slackoff.cli import main @pytest.fixture def runner():", "pytest from click.testing import CliRunner from expecter import expect from slackoff.cli import main", "pylint: disable=redefined-outer-name,unused-variable,expression-not-assigned import pytest from click.testing import CliRunner from expecter import expect from", "expect from slackoff.cli import main @pytest.fixture def runner(): return CliRunner() def describe_cli(): def", "def describe_cli(): def describe_signout(): def it_can_force_signin(runner): result = runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code) ==", "# pylint: disable=redefined-outer-name,unused-variable,expression-not-assigned import pytest from click.testing import CliRunner from expecter import expect", "disable=redefined-outer-name,unused-variable,expression-not-assigned import pytest from click.testing import CliRunner from expecter import expect from slackoff.cli", "slackoff.cli import main @pytest.fixture def runner(): return CliRunner() def describe_cli(): def describe_signout(): def", "runner(): return CliRunner() def describe_cli(): def describe_signout(): def it_can_force_signin(runner): result = runner.invoke(main, [\"Foobar\",", "return CliRunner() def describe_cli(): def describe_signout(): def it_can_force_signin(runner): result = runner.invoke(main, [\"Foobar\", \"--signout\"])", "from slackoff.cli import main @pytest.fixture def runner(): return CliRunner() def describe_cli(): def describe_signout():", "def it_can_force_signin(runner): result = runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code) == 0 expect(result.output) == \"Currently", "= runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code) == 0 expect(result.output) == \"Currently signed out of", "import pytest from click.testing import CliRunner from expecter import expect from slackoff.cli import", "expecter import expect from slackoff.cli import main @pytest.fixture def runner(): return CliRunner() def", "result = runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code) == 0 expect(result.output) == \"Currently signed out", "from expecter import expect from slackoff.cli import main @pytest.fixture def runner(): return CliRunner()", "import expect from slackoff.cli import main @pytest.fixture def runner(): return CliRunner() def describe_cli():", "describe_signout(): def it_can_force_signin(runner): result = runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code) == 0 expect(result.output) ==", "@pytest.fixture def runner(): return CliRunner() def describe_cli(): def describe_signout(): def it_can_force_signin(runner): result =", "def describe_signout(): def it_can_force_signin(runner): result = runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code) == 0 expect(result.output)", "def runner(): return CliRunner() def describe_cli(): def describe_signout(): def it_can_force_signin(runner): result = runner.invoke(main,", "it_can_force_signin(runner): result = runner.invoke(main, [\"Foobar\", \"--signout\"]) expect(result.exit_code) == 0 expect(result.output) == \"Currently signed", "from click.testing import CliRunner from expecter import expect from slackoff.cli import main @pytest.fixture" ]
[ "session.query(Server).filter(Server.id==server_id).one_or_none() # If hen isn't, GTFO if u is None: return # Do", "def load_config(configfile): \"\"\"Return a dict with configuration from the supplied yaml file.\"\"\" try:", "from client.send_message(message.channel, \"Du är nu borttagen\") else: print('[on_message] unregister - server dont exist.", "yield from client.send_message(message.channel, fikare_mentions + \"FIKA!!\") #Skrik. ( session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60)) .delete()", "# return # BEGIN Anti-spam section # Because people are generally assholes and", "time import yaml import os import sys from sqlalchemy import func, create_engine from", "elif client.user in message.mentions and 'register' in message.content: print('[on_message] Adding client ID:{0} on", "att pinga folk på fikalistan när någon pingar mig med \"fika\". `@fikabotten register`", "import sessionmaker from db import Base,User,Server,Trigger def load_config(configfile): \"\"\"Return a dict with configuration", "asyncio.sleep(3) yield from client.send_typing(message.channel) yield from asyncio.sleep(1) yield from client.send_message(message.channel, message.author.mention + '", "with configuration from the supplied yaml file.\"\"\" try: with open(configfile, 'r') as ymlfile:", "# Lägg till mentions till en lång sträng. yield from client.send_message(message.channel, fikare_mentions +", "rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + ' Förhelvete...!') if rate_1m ==", "fikalistan. \"\"\" ) elif client.user in message.mentions and 'unregister' in message.content: print('[on_message] Removing", "client.send_message(message.channel, \"Du är nu borttagen\") else: print('[on_message] unregister - server dont exist. waaat')", "engine = create_engine(config.get('database'), echo=True) Session = sessionmaker(bind=engine) session = Session() Base.metadata.create_all(engine) # Create", "rate_5m == 4: print('RTL-2 - verbose') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention", "None: u = User(id=user_id) session.add(u) print('Added user to database') s = session.query(Server).filter(Server.id==server_id).one_or_none() if", "= message.server.id print('UserID: {0}'.format(message.author.id)) if client.user in message.mentions and 'help' in message.content: yield", "client.send_message(message.channel, fikare_mentions + \"FIKA!!\") #Skrik. ( session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60)) .delete() ) elif", "# rate_1m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60)) # 60s", ".select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60)) # 60s .scalar() ) rate_g_5m = (", "s.users # En list med alla användare relaterade till servern. fikare_mentions = \"\"", "the supplied yaml file.\"\"\" try: with open(configfile, 'r') as ymlfile: try: config =", "server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is None: u = User(id=user_id) session.add(u) print('Added", "from client.send_message(message.channel, \"\"\" Jag kommer att pinga folk på fikalistan när någon pingar", "' pingad när det är fika på G.') else: print('But, your already registered", "# Get server and user objects as u and s. u = session.query(User).filter(User.id==user_id).one_or_none()", "from sqlalchemy import func, create_engine from sqlalchemy.orm import sessionmaker from db import Base,User,Server,Trigger", "None: s.users.remove(u) session.commit() yield from client.send_message(message.channel, \"Du är nu borttagen\") else: print('[on_message] unregister", "redan tillagd ') yield from asyncio.sleep(3) yield from client.send_typing(message.channel) yield from asyncio.sleep(1) yield", "section # Because people are generally assholes and will probably attempt to misuse", "generally assholes and will probably attempt to misuse the bot. # rate_1m =", "print(json.dumps(message.raw_mentions)) print('Recived message') user_id = message.author.id server_id = message.server.id print('UserID: {0}'.format(message.author.id)) if client.user", "timestamp=int(time.time()) )) # Gotta commit those changes to the DB. session.commit() if rate_limit_bail:", "h == 11 or h == 12: # yield from client.send_message(message.channel, message.author.mention +", "Okej, ready for action. Vi har serverobjektet. if s is not None: fikare_db", "# .filter(Trigger.time > ()time.time()-(30*60)) # 5*60 s # .scalar() #) rate_limit_bail = False", "print('But, your already registered in this server :o') yield from client.send_message(message.channel, 'Du är", "s is not None: fikare_db = s.users # En list med alla användare", "fikalistan. `@fikabotten unregister` - bli borttagen ifrån fikalistan. `@fikabotten fika` - Trigga meddelandet", "If hen isn't, GTFO if u is None: return # Do a check", ".filter(Trigger.server_id==server_id) # .filter(Trigger.time > ()time.time()-(30*60)) # 5*60 s # .scalar() #) rate_limit_bail =", "print('RTL-1 - silent') rate_limit_bail = True # RTL-2 if rate_5m == 4: print('RTL-2", "s = Server(id=server_id) session.add(s) print('Added server to database') if not s in u.servers:", "== 12: # yield from client.send_message(message.channel, message.author.mention + ' Fika? Det är ju", "in message.mentions and len(re.findall('fika', message.content,flags=re.I)) > 0): print('[on_message] TIME TO GET SOME FIKA')", "if s is None: s = Server(id=server_id) session.add(s) print('Added server to database') if", "dont exist. waaat') else: print('[on_message] unregister - user dont exist') elif client.user in", "len(re.findall('fika', message.content,flags=re.I)) > 0): print('[on_message] TIME TO GET SOME FIKA') h = time.localtime().tm_hour", "client.send_message(message.channel, 'Du kommer att bli' + ' pingad när det är fika på", "En list med alla användare relaterade till servern. fikare_mentions = \"\" for fikare", "discord import asyncio import re import time import yaml import os import sys", "print('Logged in as') print(client.user.name) print(client.user.id) print('------') @client.event @asyncio.coroutine def on_message(message): # Stop handling", "exist. waaat') else: print('[on_message] unregister - user dont exist') elif client.user in message.mentions", "client.send_message(message.channel, message.author.mention + ' :middle_finger:') return elif h > 18: yield from client.send_message(message.channel,", "if u is None: return # Do a check for odd hours of", "and 'register' in message.content: print('[on_message] Adding client ID:{0} on ID:{1}'.format(user_id, server_id)) u =", "try: with open(configfile, 'r') as ymlfile: try: config = yaml.load(ymlfile) except yaml.parser.ParserError: print('Could", "server and user objects as u and s. u = session.query(User).filter(User.id==user_id).one_or_none() s =", "18: yield from client.send_message(message.channel, message.author.mention + ' Lite sent för fika nu....') return", "list med alla användare relaterade till servern. fikare_mentions = \"\" for fikare in", "to the DB. session.commit() if rate_limit_bail: return # END Anti-spam section # #", "create_engine(config.get('database'), echo=True) Session = sessionmaker(bind=engine) session = Session() Base.metadata.create_all(engine) # Create client client", "sent för fika nu....') return #elif h == 10 or h == 11", "the day. if h < 8 or h > 23: yield from client.send_message(message.channel,", "message.mentions and 'help' in message.content: yield from client.send_typing(message.channel) yield from asyncio.sleep(4) yield from", "if not s in u.servers: u.servers.append(s) session.commit() print('Added client to server') yield from", "print('------') @client.event @asyncio.coroutine def on_message(message): # Stop handling messages from this bot immediately.", "messages from this bot immediately. if message.author == client.user: return import json print(json.dumps(message.raw_mentions))", "@asyncio.coroutine def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') @client.event @asyncio.coroutine def on_message(message):", "'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile) # Setup database engine", "yield from client.send_typing(message.channel) yield from asyncio.sleep(4) yield from client.send_message(message.channel, \"\"\" Jag kommer att", "and user objects as u and s. u = session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none()", "def on_message(message): # Stop handling messages from this bot immediately. if message.author ==", "are generally assholes and will probably attempt to misuse the bot. # rate_1m", "echo=True) Session = sessionmaker(bind=engine) session = Session() Base.metadata.create_all(engine) # Create client client =", "message.mentions and len(re.findall('fika', message.content,flags=re.I)) > 0): print('[on_message] TIME TO GET SOME FIKA') h", "#) rate_limit_bail = False # RTL-1 if rate_g_5m >= 1: print('RTL-1 - silent')", "= True # RTL-2 if rate_5m == 4: print('RTL-2 - verbose') rate_limit_bail =", ".filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60 s .scalar() ) #rate_30m = ( #", "FIKA') h = time.localtime().tm_hour # Get server and user objects as u and", ") elif len(re.findall('fika', message.content, flags=re.I)) > 0: print('[on_message] DEBUG: fika matched, but no", "session.delete(u) session.commit() elif client.user in message.mentions and 'register' in message.content: print('[on_message] Adding client", "yaml file.\"\"\" try: with open(configfile, 'r') as ymlfile: try: config = yaml.load(ymlfile) except", "from ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: s =", "to server') yield from client.send_message(message.channel, 'Du kommer att bli' + ' pingad när", "TO GET SOME FIKA') h = time.localtime().tm_hour # Get server and user objects", "time.time()-(5*60)) # 5*60 s .scalar() ) #rate_30m = ( # session.query(func.count('*')) # .select_from(Trigger)", "client.send_typing(message.channel) yield from asyncio.sleep(4) yield from client.send_message(message.channel, \"\"\" Jag kommer att pinga folk", "int(time.time()-30*60)) .delete() ) elif len(re.findall('fika', message.content, flags=re.I)) > 0: print('[on_message] DEBUG: fika matched,", "time.time()-(5*60)) # 5*60s .scalar() ) rate_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp", "lunch...') # return # BEGIN Anti-spam section # Because people are generally assholes", "yield from client.send_message(message.channel, \"\"\" Jag kommer att pinga folk på fikalistan när någon", "if s is not None: s.users.remove(u) session.commit() yield from client.send_message(message.channel, \"Du är nu", "+ ' Fika? Det är ju lunch...') # return # BEGIN Anti-spam section", "else: print('But, your already registered in this server :o') yield from client.send_message(message.channel, 'Du", "alla på fikalistan. \"\"\" ) elif client.user in message.mentions and 'unregister' in message.content:", "= load_config(configfile) # Setup database engine = create_engine(config.get('database'), echo=True) Session = sessionmaker(bind=engine) session", "message.author.mention + ' Fika? Det är ju lunch...') # return # BEGIN Anti-spam", "client.send_message(message.channel, message.author.mention + ' Förhelvete...!') if rate_1m == 7: print('RTL-3 - GTFO') rate_limit_bail", "from sqlalchemy.orm import sessionmaker from db import Base,User,Server,Trigger def load_config(configfile): \"\"\"Return a dict", "in message.mentions and 'help' in message.content: yield from client.send_typing(message.channel) yield from asyncio.sleep(4) yield", "# .select_from(Trigger) # .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) # .filter(Trigger.time > ()time.time()-(30*60)) # 5*60 s", "message.author.mention + ' Förhelvete...!') if rate_1m == 7: print('RTL-3 - GTFO') rate_limit_bail =", "Jag kommer att pinga folk på fikalistan när någon pingar mig med \"fika\".", "if rate_g_5m >= 1: print('RTL-1 - silent') rate_limit_bail = True # RTL-2 if", "när det är fika på G.') else: print('But, your already registered in this", "re import time import yaml import os import sys from sqlalchemy import func,", "alla användare relaterade till servern. fikare_mentions = \"\" for fikare in fikare_db: #loopa", "client.user in message.mentions and 'GTFO' in message.content: print('[on_message] Removing client ID:{0} from everywhere')", "= Session() Base.metadata.create_all(engine) # Create client client = discord.Client() @client.event @asyncio.coroutine def on_ready():", "h == 12: # yield from client.send_message(message.channel, message.author.mention + ' Fika? Det är", "message.mentions and 'unregister' in message.content: print('[on_message] Removing client ID:{0} from ID:{1}'.format(user_id, server_id)) u", "not s in u.servers: u.servers.append(s) session.commit() print('Added client to server') yield from client.send_message(message.channel,", "borttagen ifrån fikalistan. `@fikabotten fika` - Trigga meddelandet till alla på fikalistan. \"\"\"", "session.query(User).filter(User.id==user_id).one_or_none() if u is not None: session.delete(u) session.commit() elif client.user in message.mentions and", "mentions till en lång sträng. yield from client.send_message(message.channel, fikare_mentions + \"FIKA!!\") #Skrik. (", "return elif h > 18: yield from client.send_message(message.channel, message.author.mention + ' Lite sent", "True # RTL-2 if rate_5m == 4: print('RTL-2 - verbose') rate_limit_bail = True", "# If hen isn't, GTFO if u is None: return # Do a", "server_id=server_id, timestamp=int(time.time()) )) # Gotta commit those changes to the DB. session.commit() if", "(client.user in message.mentions and len(re.findall('fika', message.content,flags=re.I)) > 0): print('[on_message] TIME TO GET SOME", "configfile = os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile) # Setup database engine = create_engine(config.get('database'), echo=True)", "user objects as u and s. u = session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none() #", "% configfile) sys.exit(1) return config configfile = 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG')", "bli' + ' pingad när det är fika på G.') else: print('But, your", "= True yield from client.send_message(message.channel, message.author.mention + ' Förhelvete...!') if rate_1m == 7:", "fika` - Trigga meddelandet till alla på fikalistan. \"\"\" ) elif client.user in", "u.servers: u.servers.append(s) session.commit() print('Added client to server') yield from client.send_message(message.channel, 'Du kommer att", "import discord import asyncio import re import time import yaml import os import", "python3 import discord import asyncio import re import time import yaml import os", "fikare_mentions = \"\" for fikare in fikare_db: #loopa över listan fikare_mentions += '<@{0.id}>", "odd hours of the day. if h < 8 or h > 23:", "client.user in message.mentions and 'unregister' in message.content: print('[on_message] Removing client ID:{0} from ID:{1}'.format(user_id,", "5*60 s .scalar() ) #rate_30m = ( # session.query(func.count('*')) # .select_from(Trigger) # .filter(Trigger.user_id==user_id)", "== 4: print('RTL-2 - verbose') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention +", "h = time.localtime().tm_hour # Get server and user objects as u and s.", "misuse the bot. # rate_1m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp >", "= session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none() # If hen isn't, GTFO if u is", "'help' in message.content: yield from client.send_typing(message.channel) yield from asyncio.sleep(4) yield from client.send_message(message.channel, \"\"\"", "return # Do a check for odd hours of the day. if h", "# # Okej, ready for action. Vi har serverobjektet. if s is not", "RTL-1 if rate_g_5m >= 1: print('RTL-1 - silent') rate_limit_bail = True # RTL-2", "return # BEGIN Anti-spam section # Because people are generally assholes and will", "@client.event @asyncio.coroutine def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') @client.event @asyncio.coroutine def", "()time.time()-(30*60)) # 5*60 s # .scalar() #) rate_limit_bail = False # RTL-1 if", "# RTL-2 if rate_5m == 4: print('RTL-2 - verbose') rate_limit_bail = True yield", "is not None: s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is not None: s.users.remove(u) session.commit()", "if s is not None: fikare_db = s.users # En list med alla", "( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60 s .scalar() )", "yaml import os import sys from sqlalchemy import func, create_engine from sqlalchemy.orm import", "Lite sent för fika nu....') return #elif h == 10 or h ==", "s # .scalar() #) rate_limit_bail = False # RTL-1 if rate_g_5m >= 1:", "till en lång sträng. yield from client.send_message(message.channel, fikare_mentions + \"FIKA!!\") #Skrik. ( session.query(Trigger)", "<reponame>albgus/fikabotten<gh_stars>1-10 #!/usr/bin/env python3 import discord import asyncio import re import time import yaml", "( session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60)) .delete() ) elif len(re.findall('fika', message.content, flags=re.I)) > 0:", "client.user in message.mentions and 'register' in message.content: print('[on_message] Adding client ID:{0} on ID:{1}'.format(user_id,", "from this bot immediately. if message.author == client.user: return import json print(json.dumps(message.raw_mentions)) print('Recived", "probably attempt to misuse the bot. # rate_1m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id)", "%s' % configfile) sys.exit(1) return config configfile = 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile =", "file: %s' % configfile) sys.exit(1) return config configfile = 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile", "not parse config file: %s' % configfile) sys.exit(1) except IOError: print('Could not open", "message') user_id = message.author.id server_id = message.server.id print('UserID: {0}'.format(message.author.id)) if client.user in message.mentions", "u = User(id=user_id) session.add(u) print('Added user to database') s = session.query(Server).filter(Server.id==server_id).one_or_none() if s", "except yaml.parser.ParserError: print('Could not parse config file: %s' % configfile) sys.exit(1) except IOError:", ".filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60)) # 60s .scalar() ) rate_g_5m = ( session.query(func.count('*'))", "server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: s = session.query(Server).filter(Server.id==server_id).one_or_none() if", "except IOError: print('Could not open config file: %s' % configfile) sys.exit(1) return config", ":middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) )) # Gotta commit those changes to the", "serverobjektet. if s is not None: fikare_db = s.users # En list med", "'GTFO' in message.content: print('[on_message] Removing client ID:{0} from everywhere') u = session.query(User).filter(User.id==user_id).one_or_none() if", "# .scalar() #) rate_limit_bail = False # RTL-1 if rate_g_5m >= 1: print('RTL-1", "% configfile) sys.exit(1) except IOError: print('Could not open config file: %s' % configfile)", "print('RTL-2 - verbose') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + ' Förhelvete...!')", "@asyncio.coroutine def on_message(message): # Stop handling messages from this bot immediately. if message.author", "fikalistan. `@fikabotten fika` - Trigga meddelandet till alla på fikalistan. \"\"\" ) elif", "this bot immediately. if message.author == client.user: return import json print(json.dumps(message.raw_mentions)) print('Recived message')", "+= '<@{0.id}> '.format(fikare) # Lägg till mentions till en lång sträng. yield from", "session.commit() elif client.user in message.mentions and 'register' in message.content: print('[on_message] Adding client ID:{0}", "( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s .scalar() ) rate_5m =", "s .scalar() ) #rate_30m = ( # session.query(func.count('*')) # .select_from(Trigger) # .filter(Trigger.user_id==user_id) #", "# Create client client = discord.Client() @client.event @asyncio.coroutine def on_ready(): print('Logged in as')", "- user dont exist') elif client.user in message.mentions and 'GTFO' in message.content: print('[on_message]", "'Du är redan tillagd ') yield from asyncio.sleep(3) yield from client.send_typing(message.channel) yield from", ".filter(Trigger.timestamp > time.time()-(5*60)) # 5*60 s .scalar() ) #rate_30m = ( # session.query(func.count('*'))", ".delete() ) elif len(re.findall('fika', message.content, flags=re.I)) > 0: print('[on_message] DEBUG: fika matched, but", "if u is not None: session.delete(u) session.commit() elif client.user in message.mentions and 'register'", "GTFO') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') session.add(Trigger( user_id=message.author.id,", "hours of the day. if h < 8 or h > 23: yield", "configfile) sys.exit(1) except IOError: print('Could not open config file: %s' % configfile) sys.exit(1)", "not None: session.delete(u) session.commit() elif client.user in message.mentions and 'register' in message.content: print('[on_message]", "s is None: s = Server(id=server_id) session.add(s) print('Added server to database') if not", "Create client client = discord.Client() @client.event @asyncio.coroutine def on_ready(): print('Logged in as') print(client.user.name)", "in message.content: yield from client.send_typing(message.channel) yield from asyncio.sleep(4) yield from client.send_message(message.channel, \"\"\" Jag", "server dont exist. waaat') else: print('[on_message] unregister - user dont exist') elif client.user", "ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is None: u = User(id=user_id) session.add(u)", "client.send_message(message.channel, 'Du är redan tillagd ') yield from asyncio.sleep(3) yield from client.send_typing(message.channel) yield", "check for odd hours of the day. if h < 8 or h", "print('RTL-3 - GTFO') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:')", "user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) )) # Gotta commit those changes to the DB. session.commit()", "db import Base,User,Server,Trigger def load_config(configfile): \"\"\"Return a dict with configuration from the supplied", "None: fikare_db = s.users # En list med alla användare relaterade till servern.", "discord.Client() @client.event @asyncio.coroutine def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') @client.event @asyncio.coroutine", "client.send_message(message.channel, message.author.mention + ' :middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) )) # Gotta commit", "a dict with configuration from the supplied yaml file.\"\"\" try: with open(configfile, 'r')", "client ID:{0} on ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is None: u", "attempt to misuse the bot. # rate_1m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id)", "pingad när det är fika på G.') else: print('But, your already registered in", "TIME TO GET SOME FIKA') h = time.localtime().tm_hour # Get server and user", "isn't, GTFO if u is None: return # Do a check for odd", "print('[on_message] Adding client ID:{0} on ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is", "None: s = Server(id=server_id) session.add(s) print('Added server to database') if not s in", "Lägg till mentions till en lång sträng. yield from client.send_message(message.channel, fikare_mentions + \"FIKA!!\")", "is not None: fikare_db = s.users # En list med alla användare relaterade", "Removing client ID:{0} from everywhere') u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None:", "and s. u = session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none() # If hen isn't, GTFO", "%s' % configfile) sys.exit(1) except IOError: print('Could not open config file: %s' %", "if rate_limit_bail: return # END Anti-spam section # # Okej, ready for action.", "client = discord.Client() @client.event @asyncio.coroutine def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------')", "END Anti-spam section # # Okej, ready for action. Vi har serverobjektet. if", "bot. # rate_1m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60)) #", "client ID:{0} from ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None:", "else: print('[on_message] unregister - server dont exist. waaat') else: print('[on_message] unregister - user", "yield from asyncio.sleep(1) yield from client.send_message(message.channel, message.author.mention + ' n00b') elif (client.user in", "s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is None: s = Server(id=server_id) session.add(s) print('Added server", "func, create_engine from sqlalchemy.orm import sessionmaker from db import Base,User,Server,Trigger def load_config(configfile): \"\"\"Return", "if os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile) # Setup database engine =", "message.author.id server_id = message.server.id print('UserID: {0}'.format(message.author.id)) if client.user in message.mentions and 'help' in", "message.mentions and 'register' in message.content: print('[on_message] Adding client ID:{0} on ID:{1}'.format(user_id, server_id)) u", "'.format(fikare) # Lägg till mentions till en lång sträng. yield from client.send_message(message.channel, fikare_mentions", "#loopa över listan fikare_mentions += '<@{0.id}> '.format(fikare) # Lägg till mentions till en", "u = session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none() # If hen isn't, GTFO if u", "def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') @client.event @asyncio.coroutine def on_message(message): #", "from everywhere') u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: session.delete(u) session.commit() elif", "if rate_1m == 7: print('RTL-3 - GTFO') rate_limit_bail = True yield from client.send_message(message.channel,", "print('[on_message] unregister - server dont exist. waaat') else: print('[on_message] unregister - user dont", "not None: s.users.remove(u) session.commit() yield from client.send_message(message.channel, \"Du är nu borttagen\") else: print('[on_message]", "configfile = 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile) # Setup", "nu....') return #elif h == 10 or h == 11 or h ==", "fikare in fikare_db: #loopa över listan fikare_mentions += '<@{0.id}> '.format(fikare) # Lägg till", "message.author.mention + ' :middle_finger:') return elif h > 18: yield from client.send_message(message.channel, message.author.mention", "( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60)) # 60s .scalar() ) rate_g_5m", "u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: session.delete(u) session.commit() elif client.user in", "till mentions till en lång sträng. yield from client.send_message(message.channel, fikare_mentions + \"FIKA!!\") #Skrik.", "people are generally assholes and will probably attempt to misuse the bot. #", "rate_limit_bail = False # RTL-1 if rate_g_5m >= 1: print('RTL-1 - silent') rate_limit_bail", "is not None: s.users.remove(u) session.commit() yield from client.send_message(message.channel, \"Du är nu borttagen\") else:", "yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') return elif h > 18: yield", "and len(re.findall('fika', message.content,flags=re.I)) > 0): print('[on_message] TIME TO GET SOME FIKA') h =", "s = session.query(Server).filter(Server.id==server_id).one_or_none() # If hen isn't, GTFO if u is None: return", "database') if not s in u.servers: u.servers.append(s) session.commit() print('Added client to server') yield", "message.mentions and 'GTFO' in message.content: print('[on_message] Removing client ID:{0} from everywhere') u =", "import yaml import os import sys from sqlalchemy import func, create_engine from sqlalchemy.orm", "#elif h == 10 or h == 11 or h == 12: #", "' Förhelvete...!') if rate_1m == 7: print('RTL-3 - GTFO') rate_limit_bail = True yield", "u = session.query(User).filter(User.id==user_id).one_or_none() if u is None: u = User(id=user_id) session.add(u) print('Added user", "= 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile) # Setup database", "os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile) # Setup database engine = create_engine(config.get('database'), echo=True) Session =", "kommer att pinga folk på fikalistan när någon pingar mig med \"fika\". `@fikabotten", "mig med \"fika\". `@fikabotten register` - registrera dig på fikalistan. `@fikabotten unregister` -", "return config configfile = 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile)", "Vi har serverobjektet. if s is not None: fikare_db = s.users # En", "import sys from sqlalchemy import func, create_engine from sqlalchemy.orm import sessionmaker from db", "' n00b') elif (client.user in message.mentions and len(re.findall('fika', message.content,flags=re.I)) > 0): print('[on_message] TIME", "- silent') rate_limit_bail = True # RTL-2 if rate_5m == 4: print('RTL-2 -", "# Okej, ready for action. Vi har serverobjektet. if s is not None:", "u and s. u = session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none() # If hen isn't,", "> ()time.time()-(30*60)) # 5*60 s # .scalar() #) rate_limit_bail = False # RTL-1", "return # END Anti-spam section # # Okej, ready for action. Vi har", "try: config = yaml.load(ymlfile) except yaml.parser.ParserError: print('Could not parse config file: %s' %", "for action. Vi har serverobjektet. if s is not None: fikare_db = s.users", "listan fikare_mentions += '<@{0.id}> '.format(fikare) # Lägg till mentions till en lång sträng.", "if u is None: u = User(id=user_id) session.add(u) print('Added user to database') s", "# 60s .scalar() ) rate_g_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60))", "Removing client ID:{0} from ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is not", "för fika nu....') return #elif h == 10 or h == 11 or", "Get server and user objects as u and s. u = session.query(User).filter(User.id==user_id).one_or_none() s", "or h == 11 or h == 12: # yield from client.send_message(message.channel, message.author.mention", "client.send_message(message.channel, \"\"\" Jag kommer att pinga folk på fikalistan när någon pingar mig", "Do a check for odd hours of the day. if h < 8", "Because people are generally assholes and will probably attempt to misuse the bot.", ")) # Gotta commit those changes to the DB. session.commit() if rate_limit_bail: return", ".filter(Trigger.timestamp < int(time.time()-30*60)) .delete() ) elif len(re.findall('fika', message.content, flags=re.I)) > 0: print('[on_message] DEBUG:", "meddelandet till alla på fikalistan. \"\"\" ) elif client.user in message.mentions and 'unregister'", "asyncio.sleep(1) yield from client.send_message(message.channel, message.author.mention + ' n00b') elif (client.user in message.mentions and", "> time.time()-(60)) # 60s .scalar() ) rate_g_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp", "session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) )) # Gotta commit those changes to the DB.", "\"\"\"Return a dict with configuration from the supplied yaml file.\"\"\" try: with open(configfile,", "client.send_message(message.channel, message.author.mention + ' Lite sent för fika nu....') return #elif h ==", "> 23: yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') return elif h >", ".select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60 s .scalar() ) #rate_30m =", "DB. session.commit() if rate_limit_bail: return # END Anti-spam section # # Okej, ready", "'<@{0.id}> '.format(fikare) # Lägg till mentions till en lång sträng. yield from client.send_message(message.channel,", "exist') elif client.user in message.mentions and 'GTFO' in message.content: print('[on_message] Removing client ID:{0}", "print('Could not parse config file: %s' % configfile) sys.exit(1) except IOError: print('Could not", "# .filter(Trigger.server_id==server_id) # .filter(Trigger.time > ()time.time()-(30*60)) # 5*60 s # .scalar() #) rate_limit_bail", "ymlfile: try: config = yaml.load(ymlfile) except yaml.parser.ParserError: print('Could not parse config file: %s'", "Base,User,Server,Trigger def load_config(configfile): \"\"\"Return a dict with configuration from the supplied yaml file.\"\"\"", "file: %s' % configfile) sys.exit(1) except IOError: print('Could not open config file: %s'", "Session() Base.metadata.create_all(engine) # Create client client = discord.Client() @client.event @asyncio.coroutine def on_ready(): print('Logged", "- server dont exist. waaat') else: print('[on_message] unregister - user dont exist') elif", "= session.query(User).filter(User.id==user_id).one_or_none() if u is None: u = User(id=user_id) session.add(u) print('Added user to", "and will probably attempt to misuse the bot. # rate_1m = ( session.query(func.count('*'))", "message.server.id print('UserID: {0}'.format(message.author.id)) if client.user in message.mentions and 'help' in message.content: yield from", "import asyncio import re import time import yaml import os import sys from", "print('[on_message] Removing client ID:{0} from ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is", "from client.send_typing(message.channel) yield from asyncio.sleep(4) yield from client.send_message(message.channel, \"\"\" Jag kommer att pinga", "rate_g_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s .scalar() )", "s.users.remove(u) session.commit() yield from client.send_message(message.channel, \"Du är nu borttagen\") else: print('[on_message] unregister -", "nu borttagen\") else: print('[on_message] unregister - server dont exist. waaat') else: print('[on_message] unregister", "pingar mig med \"fika\". `@fikabotten register` - registrera dig på fikalistan. `@fikabotten unregister`", "sys.exit(1) return config configfile = 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG') config =", "as') print(client.user.name) print(client.user.id) print('------') @client.event @asyncio.coroutine def on_message(message): # Stop handling messages from", "en lång sträng. yield from client.send_message(message.channel, fikare_mentions + \"FIKA!!\") #Skrik. ( session.query(Trigger) .filter(Trigger.timestamp", "from client.send_message(message.channel, message.author.mention + ' Förhelvete...!') if rate_1m == 7: print('RTL-3 - GTFO')", "\"fika\". `@fikabotten register` - registrera dig på fikalistan. `@fikabotten unregister` - bli borttagen", "message.content: yield from client.send_typing(message.channel) yield from asyncio.sleep(4) yield from client.send_message(message.channel, \"\"\" Jag kommer", "# .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) # .filter(Trigger.time > ()time.time()-(30*60)) # 5*60 s # .scalar()", "in message.mentions and 'unregister' in message.content: print('[on_message] Removing client ID:{0} from ID:{1}'.format(user_id, server_id))", "config = yaml.load(ymlfile) except yaml.parser.ParserError: print('Could not parse config file: %s' % configfile)", "user_id = message.author.id server_id = message.server.id print('UserID: {0}'.format(message.author.id)) if client.user in message.mentions and", "message.author.mention + ' :middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) )) # Gotta commit those", "= yaml.load(ymlfile) except yaml.parser.ParserError: print('Could not parse config file: %s' % configfile) sys.exit(1)", "objects as u and s. u = session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none() # If", "\"\" for fikare in fikare_db: #loopa över listan fikare_mentions += '<@{0.id}> '.format(fikare) #", "{0}'.format(message.author.id)) if client.user in message.mentions and 'help' in message.content: yield from client.send_typing(message.channel) yield", "from client.send_message(message.channel, message.author.mention + ' :middle_finger:') return elif h > 18: yield from", "is None: s = Server(id=server_id) session.add(s) print('Added server to database') if not s", "fika nu....') return #elif h == 10 or h == 11 or h", ".scalar() ) rate_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) #", "servern. fikare_mentions = \"\" for fikare in fikare_db: #loopa över listan fikare_mentions +=", "config file: %s' % configfile) sys.exit(1) return config configfile = 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'):", "from the supplied yaml file.\"\"\" try: with open(configfile, 'r') as ymlfile: try: config", "# Setup database engine = create_engine(config.get('database'), echo=True) Session = sessionmaker(bind=engine) session = Session()", "is None: u = User(id=user_id) session.add(u) print('Added user to database') s = session.query(Server).filter(Server.id==server_id).one_or_none()", "session.add(u) print('Added user to database') s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is None: s", "är fika på G.') else: print('But, your already registered in this server :o')", "med \"fika\". `@fikabotten register` - registrera dig på fikalistan. `@fikabotten unregister` - bli", "yield from client.send_message(message.channel, message.author.mention + ' Förhelvete...!') if rate_1m == 7: print('RTL-3 -", "== 7: print('RTL-3 - GTFO') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention +", "message.author == client.user: return import json print(json.dumps(message.raw_mentions)) print('Recived message') user_id = message.author.id server_id", "time.time()-(60)) # 60s .scalar() ) rate_g_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp >", "handling messages from this bot immediately. if message.author == client.user: return import json", "- verbose') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + ' Förhelvete...!') if", "= session.query(User).filter(User.id==user_id).one_or_none() if u is not None: s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is", "asyncio.sleep(4) yield from client.send_message(message.channel, \"\"\" Jag kommer att pinga folk på fikalistan när", "Det är ju lunch...') # return # BEGIN Anti-spam section # Because people", "= User(id=user_id) session.add(u) print('Added user to database') s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is", "from client.send_message(message.channel, message.author.mention + ' n00b') elif (client.user in message.mentions and len(re.findall('fika', message.content,flags=re.I))", "for fikare in fikare_db: #loopa över listan fikare_mentions += '<@{0.id}> '.format(fikare) # Lägg", "file.\"\"\" try: with open(configfile, 'r') as ymlfile: try: config = yaml.load(ymlfile) except yaml.parser.ParserError:", "ID:{0} on ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is None: u =", "client client = discord.Client() @client.event @asyncio.coroutine def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id)", "ju lunch...') # return # BEGIN Anti-spam section # Because people are generally", "# 5*60 s # .scalar() #) rate_limit_bail = False # RTL-1 if rate_g_5m", "' :middle_finger:') return elif h > 18: yield from client.send_message(message.channel, message.author.mention + '", "# RTL-1 if rate_g_5m >= 1: print('RTL-1 - silent') rate_limit_bail = True #", "= session.query(User).filter(User.id==user_id).one_or_none() if u is not None: session.delete(u) session.commit() elif client.user in message.mentions", "h > 18: yield from client.send_message(message.channel, message.author.mention + ' Lite sent för fika", "session.query(func.count('*')) # .select_from(Trigger) # .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) # .filter(Trigger.time > ()time.time()-(30*60)) # 5*60", "Trigga meddelandet till alla på fikalistan. \"\"\" ) elif client.user in message.mentions and", "fikare_db = s.users # En list med alla användare relaterade till servern. fikare_mentions", "database engine = create_engine(config.get('database'), echo=True) Session = sessionmaker(bind=engine) session = Session() Base.metadata.create_all(engine) #", "23: yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') return elif h > 18:", "session.commit() if rate_limit_bail: return # END Anti-spam section # # Okej, ready for", "har serverobjektet. if s is not None: fikare_db = s.users # En list", "not None: s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is not None: s.users.remove(u) session.commit() yield", "is not None: session.delete(u) session.commit() elif client.user in message.mentions and 'register' in message.content:", "sessionmaker(bind=engine) session = Session() Base.metadata.create_all(engine) # Create client client = discord.Client() @client.event @asyncio.coroutine", "session = Session() Base.metadata.create_all(engine) # Create client client = discord.Client() @client.event @asyncio.coroutine def", "the DB. session.commit() if rate_limit_bail: return # END Anti-spam section # # Okej,", "the bot. # rate_1m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60))", "= ( # session.query(func.count('*')) # .select_from(Trigger) # .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) # .filter(Trigger.time >", "session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none() # If hen isn't, GTFO if u is None:", "True yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) ))", "') yield from asyncio.sleep(3) yield from client.send_typing(message.channel) yield from asyncio.sleep(1) yield from client.send_message(message.channel,", "bot immediately. if message.author == client.user: return import json print(json.dumps(message.raw_mentions)) print('Recived message') user_id", "print(client.user.id) print('------') @client.event @asyncio.coroutine def on_message(message): # Stop handling messages from this bot", "len(re.findall('fika', message.content, flags=re.I)) > 0: print('[on_message] DEBUG: fika matched, but no trigger.') print('------')", "hen isn't, GTFO if u is None: return # Do a check for", "`@fikabotten register` - registrera dig på fikalistan. `@fikabotten unregister` - bli borttagen ifrån", "and 'GTFO' in message.content: print('[on_message] Removing client ID:{0} from everywhere') u = session.query(User).filter(User.id==user_id).one_or_none()", "\"\"\" Jag kommer att pinga folk på fikalistan när någon pingar mig med", "> time.time()-(5*60)) # 5*60 s .scalar() ) #rate_30m = ( # session.query(func.count('*')) #", "client.user in message.mentions and 'help' in message.content: yield from client.send_typing(message.channel) yield from asyncio.sleep(4)", "message.content, flags=re.I)) > 0: print('[on_message] DEBUG: fika matched, but no trigger.') print('------') client.run(config.get('token'))", "+ ' :middle_finger:') return elif h > 18: yield from client.send_message(message.channel, message.author.mention +", "på fikalistan. `@fikabotten unregister` - bli borttagen ifrån fikalistan. `@fikabotten fika` - Trigga", "client.send_message(message.channel, message.author.mention + ' Fika? Det är ju lunch...') # return # BEGIN", "ID:{0} from ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: s", "from client.send_message(message.channel, fikare_mentions + \"FIKA!!\") #Skrik. ( session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60)) .delete() )", "yield from client.send_message(message.channel, message.author.mention + ' Lite sent för fika nu....') return #elif", "= False # RTL-1 if rate_g_5m >= 1: print('RTL-1 - silent') rate_limit_bail =", "and 'unregister' in message.content: print('[on_message] Removing client ID:{0} from ID:{1}'.format(user_id, server_id)) u =", "( # session.query(func.count('*')) # .select_from(Trigger) # .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) # .filter(Trigger.time > ()time.time()-(30*60))", ":o') yield from client.send_message(message.channel, 'Du är redan tillagd ') yield from asyncio.sleep(3) yield", "your already registered in this server :o') yield from client.send_message(message.channel, 'Du är redan", "# Gotta commit those changes to the DB. session.commit() if rate_limit_bail: return #", "Fika? Det är ju lunch...') # return # BEGIN Anti-spam section # Because", "session.query(User).filter(User.id==user_id).one_or_none() if u is not None: s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is not", "day. if h < 8 or h > 23: yield from client.send_message(message.channel, message.author.mention", "from client.send_message(message.channel, 'Du kommer att bli' + ' pingad när det är fika", "h == 10 or h == 11 or h == 12: # yield", "relaterade till servern. fikare_mentions = \"\" for fikare in fikare_db: #loopa över listan", "dict with configuration from the supplied yaml file.\"\"\" try: with open(configfile, 'r') as", "'r') as ymlfile: try: config = yaml.load(ymlfile) except yaml.parser.ParserError: print('Could not parse config", "verbose') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + ' Förhelvete...!') if rate_1m", ":middle_finger:') return elif h > 18: yield from client.send_message(message.channel, message.author.mention + ' Lite", ".filter(Trigger.timestamp > time.time()-(60)) # 60s .scalar() ) rate_g_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id)", "True yield from client.send_message(message.channel, message.author.mention + ' Förhelvete...!') if rate_1m == 7: print('RTL-3", "== 11 or h == 12: # yield from client.send_message(message.channel, message.author.mention + '", "> time.time()-(5*60)) # 5*60s .scalar() ) rate_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id)", "- GTFO') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') session.add(Trigger(", "will probably attempt to misuse the bot. # rate_1m = ( session.query(func.count('*')) .select_from(Trigger)", ".filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s .scalar() ) rate_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id)", "unregister - server dont exist. waaat') else: print('[on_message] unregister - user dont exist')", "changes to the DB. session.commit() if rate_limit_bail: return # END Anti-spam section #", "yield from asyncio.sleep(3) yield from client.send_typing(message.channel) yield from asyncio.sleep(1) yield from client.send_message(message.channel, message.author.mention", "else: print('[on_message] unregister - user dont exist') elif client.user in message.mentions and 'GTFO'", "rate_1m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60)) # 60s .scalar()", "yield from client.send_typing(message.channel) yield from asyncio.sleep(1) yield from client.send_message(message.channel, message.author.mention + ' n00b')", "import func, create_engine from sqlalchemy.orm import sessionmaker from db import Base,User,Server,Trigger def load_config(configfile):", "for odd hours of the day. if h < 8 or h >", "client ID:{0} from everywhere') u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: session.delete(u)", "ifrån fikalistan. `@fikabotten fika` - Trigga meddelandet till alla på fikalistan. \"\"\" )", "h < 8 or h > 23: yield from client.send_message(message.channel, message.author.mention + '", "0): print('[on_message] TIME TO GET SOME FIKA') h = time.localtime().tm_hour # Get server", ".filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60 s .scalar() ) #rate_30m = (", "att bli' + ' pingad när det är fika på G.') else: print('But,", "Adding client ID:{0} on ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is None:", "os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile) # Setup database engine = create_engine(config.get('database'),", "session.commit() yield from client.send_message(message.channel, \"Du är nu borttagen\") else: print('[on_message] unregister - server", "client to server') yield from client.send_message(message.channel, 'Du kommer att bli' + ' pingad", "u is not None: s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is not None: s.users.remove(u)", "waaat') else: print('[on_message] unregister - user dont exist') elif client.user in message.mentions and", "user dont exist') elif client.user in message.mentions and 'GTFO' in message.content: print('[on_message] Removing", "fikare_mentions += '<@{0.id}> '.format(fikare) # Lägg till mentions till en lång sträng. yield", "- registrera dig på fikalistan. `@fikabotten unregister` - bli borttagen ifrån fikalistan. `@fikabotten", "Gotta commit those changes to the DB. session.commit() if rate_limit_bail: return # END", "# 5*60s .scalar() ) rate_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp >", "sys from sqlalchemy import func, create_engine from sqlalchemy.orm import sessionmaker from db import", "IOError: print('Could not open config file: %s' % configfile) sys.exit(1) return config configfile", "med alla användare relaterade till servern. fikare_mentions = \"\" for fikare in fikare_db:", "BEGIN Anti-spam section # Because people are generally assholes and will probably attempt", "in as') print(client.user.name) print(client.user.id) print('------') @client.event @asyncio.coroutine def on_message(message): # Stop handling messages", "sys.exit(1) except IOError: print('Could not open config file: %s' % configfile) sys.exit(1) return", "u is None: u = User(id=user_id) session.add(u) print('Added user to database') s =", "= session.query(Server).filter(Server.id==server_id).one_or_none() if s is None: s = Server(id=server_id) session.add(s) print('Added server to", "not open config file: %s' % configfile) sys.exit(1) return config configfile = 'config.yaml'", "7: print('RTL-3 - GTFO') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + '", "yield from client.send_message(message.channel, 'Du är redan tillagd ') yield from asyncio.sleep(3) yield from", ".select_from(Trigger) # .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) # .filter(Trigger.time > ()time.time()-(30*60)) # 5*60 s #", ".filter(Trigger.time > ()time.time()-(30*60)) # 5*60 s # .scalar() #) rate_limit_bail = False #", "from client.send_message(message.channel, message.author.mention + ' Fika? Det är ju lunch...') # return #", "#!/usr/bin/env python3 import discord import asyncio import re import time import yaml import", "registrera dig på fikalistan. `@fikabotten unregister` - bli borttagen ifrån fikalistan. `@fikabotten fika`", "s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is not None: s.users.remove(u) session.commit() yield from client.send_message(message.channel,", "rate_g_5m >= 1: print('RTL-1 - silent') rate_limit_bail = True # RTL-2 if rate_5m", "RTL-2 if rate_5m == 4: print('RTL-2 - verbose') rate_limit_bail = True yield from", "yield from client.send_message(message.channel, message.author.mention + ' n00b') elif (client.user in message.mentions and len(re.findall('fika',", "config file: %s' % configfile) sys.exit(1) except IOError: print('Could not open config file:", "in message.content: print('[on_message] Adding client ID:{0} on ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if", "yaml.load(ymlfile) except yaml.parser.ParserError: print('Could not parse config file: %s' % configfile) sys.exit(1) except", "= ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s .scalar() ) rate_5m", "from client.send_typing(message.channel) yield from asyncio.sleep(1) yield from client.send_message(message.channel, message.author.mention + ' n00b') elif", "configuration from the supplied yaml file.\"\"\" try: with open(configfile, 'r') as ymlfile: try:", "print('[on_message] Removing client ID:{0} from everywhere') u = session.query(User).filter(User.id==user_id).one_or_none() if u is not", "yield from client.send_message(message.channel, message.author.mention + ' Fika? Det är ju lunch...') # return", "fika på G.') else: print('But, your already registered in this server :o') yield", "på G.') else: print('But, your already registered in this server :o') yield from", "Base.metadata.create_all(engine) # Create client client = discord.Client() @client.event @asyncio.coroutine def on_ready(): print('Logged in", "config configfile = 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile) #", "session.query(Server).filter(Server.id==server_id).one_or_none() if s is None: s = Server(id=server_id) session.add(s) print('Added server to database')", "+ ' pingad när det är fika på G.') else: print('But, your already", "print('Added user to database') s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is None: s =", "\"FIKA!!\") #Skrik. ( session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60)) .delete() ) elif len(re.findall('fika', message.content, flags=re.I))", "client.user: return import json print(json.dumps(message.raw_mentions)) print('Recived message') user_id = message.author.id server_id = message.server.id", "in message.mentions and 'register' in message.content: print('[on_message] Adding client ID:{0} on ID:{1}'.format(user_id, server_id))", "sqlalchemy import func, create_engine from sqlalchemy.orm import sessionmaker from db import Base,User,Server,Trigger def", "sessionmaker from db import Base,User,Server,Trigger def load_config(configfile): \"\"\"Return a dict with configuration from", "= Server(id=server_id) session.add(s) print('Added server to database') if not s in u.servers: u.servers.append(s)", "8 or h > 23: yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') return", "Session = sessionmaker(bind=engine) session = Session() Base.metadata.create_all(engine) # Create client client = discord.Client()", "json print(json.dumps(message.raw_mentions)) print('Recived message') user_id = message.author.id server_id = message.server.id print('UserID: {0}'.format(message.author.id)) if", "är nu borttagen\") else: print('[on_message] unregister - server dont exist. waaat') else: print('[on_message]", "in this server :o') yield from client.send_message(message.channel, 'Du är redan tillagd ') yield", "in message.content: print('[on_message] Removing client ID:{0} from ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if", "> 0): print('[on_message] TIME TO GET SOME FIKA') h = time.localtime().tm_hour # Get", "or h > 23: yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') return elif", "configfile) sys.exit(1) return config configfile = 'config.yaml' if os.getenv('FIKABOTTEN_CONFIG'): configfile = os.getenv('FIKABOTTEN_CONFIG') config", "' :middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) )) # Gotta commit those changes to", "None: session.delete(u) session.commit() elif client.user in message.mentions and 'register' in message.content: print('[on_message] Adding", "elif client.user in message.mentions and 'unregister' in message.content: print('[on_message] Removing client ID:{0} from", "from db import Base,User,Server,Trigger def load_config(configfile): \"\"\"Return a dict with configuration from the", "' Fika? Det är ju lunch...') # return # BEGIN Anti-spam section #", "print('Could not open config file: %s' % configfile) sys.exit(1) return config configfile =", "session.query(Server).filter(Server.id==server_id).one_or_none() if s is not None: s.users.remove(u) session.commit() yield from client.send_message(message.channel, \"Du är", "this server :o') yield from client.send_message(message.channel, 'Du är redan tillagd ') yield from", "session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s .scalar() ) rate_5m = (", "elif (client.user in message.mentions and len(re.findall('fika', message.content,flags=re.I)) > 0): print('[on_message] TIME TO GET", "på fikalistan när någon pingar mig med \"fika\". `@fikabotten register` - registrera dig", "rate_1m == 7: print('RTL-3 - GTFO') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention", "> 18: yield from client.send_message(message.channel, message.author.mention + ' Lite sent för fika nu....')", "assholes and will probably attempt to misuse the bot. # rate_1m = (", "= os.getenv('FIKABOTTEN_CONFIG') config = load_config(configfile) # Setup database engine = create_engine(config.get('database'), echo=True) Session", "rate_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60 s", "lång sträng. yield from client.send_message(message.channel, fikare_mentions + \"FIKA!!\") #Skrik. ( session.query(Trigger) .filter(Trigger.timestamp <", "print('[on_message] unregister - user dont exist') elif client.user in message.mentions and 'GTFO' in", "n00b') elif (client.user in message.mentions and len(re.findall('fika', message.content,flags=re.I)) > 0): print('[on_message] TIME TO", ".scalar() ) rate_g_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s", "to database') if not s in u.servers: u.servers.append(s) session.commit() print('Added client to server')", "GTFO if u is None: return # Do a check for odd hours", "= discord.Client() @client.event @asyncio.coroutine def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') @client.event", "= time.localtime().tm_hour # Get server and user objects as u and s. u", "pinga folk på fikalistan när någon pingar mig med \"fika\". `@fikabotten register` -", ">= 1: print('RTL-1 - silent') rate_limit_bail = True # RTL-2 if rate_5m ==", "'Du kommer att bli' + ' pingad när det är fika på G.')", "everywhere') u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: session.delete(u) session.commit() elif client.user", "None: s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is not None: s.users.remove(u) session.commit() yield from", "ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: s = session.query(Server).filter(Server.id==server_id).one_or_none()", "in message.content: print('[on_message] Removing client ID:{0} from everywhere') u = session.query(User).filter(User.id==user_id).one_or_none() if u", "None: return # Do a check for odd hours of the day. if", "Anti-spam section # Because people are generally assholes and will probably attempt to", "session.commit() print('Added client to server') yield from client.send_message(message.channel, 'Du kommer att bli' +", ".select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s .scalar() ) rate_5m = ( session.query(func.count('*'))", "rate_limit_bail = True # RTL-2 if rate_5m == 4: print('RTL-2 - verbose') rate_limit_bail", "already registered in this server :o') yield from client.send_message(message.channel, 'Du är redan tillagd", "os import sys from sqlalchemy import func, create_engine from sqlalchemy.orm import sessionmaker from", "a check for odd hours of the day. if h < 8 or", "or h == 12: # yield from client.send_message(message.channel, message.author.mention + ' Fika? Det", "message.content,flags=re.I)) > 0): print('[on_message] TIME TO GET SOME FIKA') h = time.localtime().tm_hour #", "u.servers.append(s) session.commit() print('Added client to server') yield from client.send_message(message.channel, 'Du kommer att bli'", "= ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60)) # 60s .scalar() )", "session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60)) # 60s .scalar() ) rate_g_5m =", "to database') s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is None: s = Server(id=server_id) session.add(s)", "print('Recived message') user_id = message.author.id server_id = message.server.id print('UserID: {0}'.format(message.author.id)) if client.user in", "load_config(configfile) # Setup database engine = create_engine(config.get('database'), echo=True) Session = sessionmaker(bind=engine) session =", "elif client.user in message.mentions and 'GTFO' in message.content: print('[on_message] Removing client ID:{0} from", "print('Added server to database') if not s in u.servers: u.servers.append(s) session.commit() print('Added client", "dig på fikalistan. `@fikabotten unregister` - bli borttagen ifrån fikalistan. `@fikabotten fika` -", "bli borttagen ifrån fikalistan. `@fikabotten fika` - Trigga meddelandet till alla på fikalistan.", "user to database') s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is None: s = Server(id=server_id)", "commit those changes to the DB. session.commit() if rate_limit_bail: return # END Anti-spam", "#Skrik. ( session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60)) .delete() ) elif len(re.findall('fika', message.content, flags=re.I)) >", "ready for action. Vi har serverobjektet. if s is not None: fikare_db =", "ID:{0} from everywhere') u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: session.delete(u) session.commit()", "Server(id=server_id) session.add(s) print('Added server to database') if not s in u.servers: u.servers.append(s) session.commit()", "session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60)) .delete() ) elif len(re.findall('fika', message.content, flags=re.I)) > 0: print('[on_message]", "1: print('RTL-1 - silent') rate_limit_bail = True # RTL-2 if rate_5m == 4:", "sqlalchemy.orm import sessionmaker from db import Base,User,Server,Trigger def load_config(configfile): \"\"\"Return a dict with", "60s .scalar() ) rate_g_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) #", "server :o') yield from client.send_message(message.channel, 'Du är redan tillagd ') yield from asyncio.sleep(3)", "load_config(configfile): \"\"\"Return a dict with configuration from the supplied yaml file.\"\"\" try: with", "10 or h == 11 or h == 12: # yield from client.send_message(message.channel,", "= message.author.id server_id = message.server.id print('UserID: {0}'.format(message.author.id)) if client.user in message.mentions and 'help'", "s in u.servers: u.servers.append(s) session.commit() print('Added client to server') yield from client.send_message(message.channel, 'Du", "över listan fikare_mentions += '<@{0.id}> '.format(fikare) # Lägg till mentions till en lång", "message.content: print('[on_message] Adding client ID:{0} on ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u", "session.query(User).filter(User.id==user_id).one_or_none() if u is None: u = User(id=user_id) session.add(u) print('Added user to database')", "4: print('RTL-2 - verbose') rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + '", "dont exist') elif client.user in message.mentions and 'GTFO' in message.content: print('[on_message] Removing client", "Anti-spam section # # Okej, ready for action. Vi har serverobjektet. if s", "from asyncio.sleep(4) yield from client.send_message(message.channel, \"\"\" Jag kommer att pinga folk på fikalistan", "in message.mentions and 'GTFO' in message.content: print('[on_message] Removing client ID:{0} from everywhere') u", "G.') else: print('But, your already registered in this server :o') yield from client.send_message(message.channel,", "# 5*60 s .scalar() ) #rate_30m = ( # session.query(func.count('*')) # .select_from(Trigger) #", "session.add(s) print('Added server to database') if not s in u.servers: u.servers.append(s) session.commit() print('Added", "när någon pingar mig med \"fika\". `@fikabotten register` - registrera dig på fikalistan.", "from asyncio.sleep(1) yield from client.send_message(message.channel, message.author.mention + ' n00b') elif (client.user in message.mentions", ") rate_g_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s .scalar()", "registered in this server :o') yield from client.send_message(message.channel, 'Du är redan tillagd ')", "time.localtime().tm_hour # Get server and user objects as u and s. u =", ".filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(60)) # 60s .scalar() ) rate_g_5m = ( session.query(func.count('*')) .select_from(Trigger)", "# Do a check for odd hours of the day. if h <", "from client.send_message(message.channel, 'Du är redan tillagd ') yield from asyncio.sleep(3) yield from client.send_typing(message.channel)", "# Stop handling messages from this bot immediately. if message.author == client.user: return", "# END Anti-spam section # # Okej, ready for action. Vi har serverobjektet.", "någon pingar mig med \"fika\". `@fikabotten register` - registrera dig på fikalistan. `@fikabotten", "in fikare_db: #loopa över listan fikare_mentions += '<@{0.id}> '.format(fikare) # Lägg till mentions", "Stop handling messages from this bot immediately. if message.author == client.user: return import", "tillagd ') yield from asyncio.sleep(3) yield from client.send_typing(message.channel) yield from asyncio.sleep(1) yield from", "`@fikabotten unregister` - bli borttagen ifrån fikalistan. `@fikabotten fika` - Trigga meddelandet till", "# session.query(func.count('*')) # .select_from(Trigger) # .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) # .filter(Trigger.time > ()time.time()-(30*60)) #", "yield from client.send_message(message.channel, 'Du kommer att bli' + ' pingad när det är", "= True yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time())", "= sessionmaker(bind=engine) session = Session() Base.metadata.create_all(engine) # Create client client = discord.Client() @client.event", "= ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60 s .scalar()", "#rate_30m = ( # session.query(func.count('*')) # .select_from(Trigger) # .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) # .filter(Trigger.time", ".scalar() #) rate_limit_bail = False # RTL-1 if rate_g_5m >= 1: print('RTL-1 -", "' Lite sent för fika nu....') return #elif h == 10 or h", "borttagen\") else: print('[on_message] unregister - server dont exist. waaat') else: print('[on_message] unregister -", "'unregister' in message.content: print('[on_message] Removing client ID:{0} from ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none()", "is None: return # Do a check for odd hours of the day.", "# En list med alla användare relaterade till servern. fikare_mentions = \"\" for", "5*60 s # .scalar() #) rate_limit_bail = False # RTL-1 if rate_g_5m >=", "open config file: %s' % configfile) sys.exit(1) return config configfile = 'config.yaml' if", "as u and s. u = session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none() # If hen", ".scalar() ) #rate_30m = ( # session.query(func.count('*')) # .select_from(Trigger) # .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id)", "from asyncio.sleep(3) yield from client.send_typing(message.channel) yield from asyncio.sleep(1) yield from client.send_message(message.channel, message.author.mention +", "+ ' n00b') elif (client.user in message.mentions and len(re.findall('fika', message.content,flags=re.I)) > 0): print('[on_message]", "u = session.query(User).filter(User.id==user_id).one_or_none() if u is not None: s = session.query(Server).filter(Server.id==server_id).one_or_none() if s", "of the day. if h < 8 or h > 23: yield from", "11 or h == 12: # yield from client.send_message(message.channel, message.author.mention + ' Fika?", "= \"\" for fikare in fikare_db: #loopa över listan fikare_mentions += '<@{0.id}> '.format(fikare)", "+ \"FIKA!!\") #Skrik. ( session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60)) .delete() ) elif len(re.findall('fika', message.content,", "fikalistan när någon pingar mig med \"fika\". `@fikabotten register` - registrera dig på", "+ ' Förhelvete...!') if rate_1m == 7: print('RTL-3 - GTFO') rate_limit_bail = True", "server_id = message.server.id print('UserID: {0}'.format(message.author.id)) if client.user in message.mentions and 'help' in message.content:", "# Because people are generally assholes and will probably attempt to misuse the", "unregister - user dont exist') elif client.user in message.mentions and 'GTFO' in message.content:", "if h < 8 or h > 23: yield from client.send_message(message.channel, message.author.mention +", "print(client.user.name) print(client.user.id) print('------') @client.event @asyncio.coroutine def on_message(message): # Stop handling messages from this", "message.content: print('[on_message] Removing client ID:{0} from ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u", "message.content: print('[on_message] Removing client ID:{0} from everywhere') u = session.query(User).filter(User.id==user_id).one_or_none() if u is", "SOME FIKA') h = time.localtime().tm_hour # Get server and user objects as u", "yield from asyncio.sleep(4) yield from client.send_message(message.channel, \"\"\" Jag kommer att pinga folk på", "register` - registrera dig på fikalistan. `@fikabotten unregister` - bli borttagen ifrån fikalistan.", "Setup database engine = create_engine(config.get('database'), echo=True) Session = sessionmaker(bind=engine) session = Session() Base.metadata.create_all(engine)", "kommer att bli' + ' pingad när det är fika på G.') else:", "användare relaterade till servern. fikare_mentions = \"\" for fikare in fikare_db: #loopa över", "on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') @client.event @asyncio.coroutine def on_message(message): # Stop", "return import json print(json.dumps(message.raw_mentions)) print('Recived message') user_id = message.author.id server_id = message.server.id print('UserID:", "@client.event @asyncio.coroutine def on_message(message): # Stop handling messages from this bot immediately. if", "if rate_5m == 4: print('RTL-2 - verbose') rate_limit_bail = True yield from client.send_message(message.channel,", "elif h > 18: yield from client.send_message(message.channel, message.author.mention + ' Lite sent för", ") #rate_30m = ( # session.query(func.count('*')) # .select_from(Trigger) # .filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) #", "immediately. if message.author == client.user: return import json print(json.dumps(message.raw_mentions)) print('Recived message') user_id =", "yield from client.send_message(message.channel, \"Du är nu borttagen\") else: print('[on_message] unregister - server dont", "silent') rate_limit_bail = True # RTL-2 if rate_5m == 4: print('RTL-2 - verbose')", "client.send_message(message.channel, message.author.mention + ' n00b') elif (client.user in message.mentions and len(re.findall('fika', message.content,flags=re.I)) >", "till servern. fikare_mentions = \"\" for fikare in fikare_db: #loopa över listan fikare_mentions", "import time import yaml import os import sys from sqlalchemy import func, create_engine", "those changes to the DB. session.commit() if rate_limit_bail: return # END Anti-spam section", "import json print(json.dumps(message.raw_mentions)) print('Recived message') user_id = message.author.id server_id = message.server.id print('UserID: {0}'.format(message.author.id))", "= s.users # En list med alla användare relaterade till servern. fikare_mentions =", "- bli borttagen ifrån fikalistan. `@fikabotten fika` - Trigga meddelandet till alla på", "as ymlfile: try: config = yaml.load(ymlfile) except yaml.parser.ParserError: print('Could not parse config file:", "folk på fikalistan när någon pingar mig med \"fika\". `@fikabotten register` - registrera", "session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60 s .scalar() ) #rate_30m", "False # RTL-1 if rate_g_5m >= 1: print('RTL-1 - silent') rate_limit_bail = True", "server to database') if not s in u.servers: u.servers.append(s) session.commit() print('Added client to", "s is not None: s.users.remove(u) session.commit() yield from client.send_message(message.channel, \"Du är nu borttagen\")", "message.author.mention + ' Lite sent för fika nu....') return #elif h == 10", "är ju lunch...') # return # BEGIN Anti-spam section # Because people are", "\"Du är nu borttagen\") else: print('[on_message] unregister - server dont exist. waaat') else:", "print('Added client to server') yield from client.send_message(message.channel, 'Du kommer att bli' + '", "\"\"\" ) elif client.user in message.mentions and 'unregister' in message.content: print('[on_message] Removing client", "rate_limit_bail: return # END Anti-spam section # # Okej, ready for action. Vi", "< int(time.time()-30*60)) .delete() ) elif len(re.findall('fika', message.content, flags=re.I)) > 0: print('[on_message] DEBUG: fika", ".filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s .scalar() ) rate_5m = ( session.query(func.count('*')) .select_from(Trigger)", "till alla på fikalistan. \"\"\" ) elif client.user in message.mentions and 'unregister' in", "if client.user in message.mentions and 'help' in message.content: yield from client.send_typing(message.channel) yield from", "- Trigga meddelandet till alla på fikalistan. \"\"\" ) elif client.user in message.mentions", "# yield from client.send_message(message.channel, message.author.mention + ' Fika? Det är ju lunch...') #", "`@fikabotten fika` - Trigga meddelandet till alla på fikalistan. \"\"\" ) elif client.user", "+ ' :middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) )) # Gotta commit those changes", "det är fika på G.') else: print('But, your already registered in this server", "== client.user: return import json print(json.dumps(message.raw_mentions)) print('Recived message') user_id = message.author.id server_id =", "return #elif h == 10 or h == 11 or h == 12:", "är redan tillagd ') yield from asyncio.sleep(3) yield from client.send_typing(message.channel) yield from asyncio.sleep(1)", "rate_limit_bail = True yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id,", "5*60s .scalar() ) rate_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60))", "supplied yaml file.\"\"\" try: with open(configfile, 'r') as ymlfile: try: config = yaml.load(ymlfile)", "'register' in message.content: print('[on_message] Adding client ID:{0} on ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none()", "database') s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is None: s = Server(id=server_id) session.add(s) print('Added", "= create_engine(config.get('database'), echo=True) Session = sessionmaker(bind=engine) session = Session() Base.metadata.create_all(engine) # Create client", "asyncio import re import time import yaml import os import sys from sqlalchemy", "server') yield from client.send_message(message.channel, 'Du kommer att bli' + ' pingad när det", "User(id=user_id) session.add(u) print('Added user to database') s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is None:", "from client.send_message(message.channel, message.author.mention + ' Lite sent för fika nu....') return #elif h", "on_message(message): # Stop handling messages from this bot immediately. if message.author == client.user:", "== 10 or h == 11 or h == 12: # yield from", "= session.query(Server).filter(Server.id==server_id).one_or_none() # If hen isn't, GTFO if u is None: return #", "fikare_db: #loopa över listan fikare_mentions += '<@{0.id}> '.format(fikare) # Lägg till mentions till", "sträng. yield from client.send_message(message.channel, fikare_mentions + \"FIKA!!\") #Skrik. ( session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60))", "= session.query(Server).filter(Server.id==server_id).one_or_none() if s is not None: s.users.remove(u) session.commit() yield from client.send_message(message.channel, \"Du", "section # # Okej, ready for action. Vi har serverobjektet. if s is", "to misuse the bot. # rate_1m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp", "print('UserID: {0}'.format(message.author.id)) if client.user in message.mentions and 'help' in message.content: yield from client.send_typing(message.channel)", "and 'help' in message.content: yield from client.send_typing(message.channel) yield from asyncio.sleep(4) yield from client.send_message(message.channel,", "h > 23: yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') return elif h", "fikare_mentions + \"FIKA!!\") #Skrik. ( session.query(Trigger) .filter(Trigger.timestamp < int(time.time()-30*60)) .delete() ) elif len(re.findall('fika',", "if u is not None: s = session.query(Server).filter(Server.id==server_id).one_or_none() if s is not None:", "# BEGIN Anti-spam section # Because people are generally assholes and will probably", "import re import time import yaml import os import sys from sqlalchemy import", "Förhelvete...!') if rate_1m == 7: print('RTL-3 - GTFO') rate_limit_bail = True yield from", "client.send_typing(message.channel) yield from asyncio.sleep(1) yield from client.send_message(message.channel, message.author.mention + ' n00b') elif (client.user", "yaml.parser.ParserError: print('Could not parse config file: %s' % configfile) sys.exit(1) except IOError: print('Could", "on ID:{1}'.format(user_id, server_id)) u = session.query(User).filter(User.id==user_id).one_or_none() if u is None: u = User(id=user_id)", "+ ' Lite sent för fika nu....') return #elif h == 10 or", "parse config file: %s' % configfile) sys.exit(1) except IOError: print('Could not open config", "print('[on_message] TIME TO GET SOME FIKA') h = time.localtime().tm_hour # Get server and", "u is None: return # Do a check for odd hours of the", "create_engine from sqlalchemy.orm import sessionmaker from db import Base,User,Server,Trigger def load_config(configfile): \"\"\"Return a", "with open(configfile, 'r') as ymlfile: try: config = yaml.load(ymlfile) except yaml.parser.ParserError: print('Could not", "s. u = session.query(User).filter(User.id==user_id).one_or_none() s = session.query(Server).filter(Server.id==server_id).one_or_none() # If hen isn't, GTFO if", "< 8 or h > 23: yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:')", "yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) )) #", "message.author.mention + ' n00b') elif (client.user in message.mentions and len(re.findall('fika', message.content,flags=re.I)) > 0):", "elif len(re.findall('fika', message.content, flags=re.I)) > 0: print('[on_message] DEBUG: fika matched, but no trigger.')", "unregister` - bli borttagen ifrån fikalistan. `@fikabotten fika` - Trigga meddelandet till alla", ".filter(Trigger.user_id==user_id) # .filter(Trigger.server_id==server_id) # .filter(Trigger.time > ()time.time()-(30*60)) # 5*60 s # .scalar() #)", ") rate_5m = ( session.query(func.count('*')) .select_from(Trigger) .filter(Trigger.user_id==user_id) .filter(Trigger.server_id==server_id) .filter(Trigger.timestamp > time.time()-(5*60)) # 5*60", "import Base,User,Server,Trigger def load_config(configfile): \"\"\"Return a dict with configuration from the supplied yaml", "in u.servers: u.servers.append(s) session.commit() print('Added client to server') yield from client.send_message(message.channel, 'Du kommer", "på fikalistan. \"\"\" ) elif client.user in message.mentions and 'unregister' in message.content: print('[on_message]", "config = load_config(configfile) # Setup database engine = create_engine(config.get('database'), echo=True) Session = sessionmaker(bind=engine)", "12: # yield from client.send_message(message.channel, message.author.mention + ' Fika? Det är ju lunch...')", "from client.send_message(message.channel, message.author.mention + ' :middle_finger:') session.add(Trigger( user_id=message.author.id, server_id=server_id, timestamp=int(time.time()) )) # Gotta", "u is not None: session.delete(u) session.commit() elif client.user in message.mentions and 'register' in", "not None: fikare_db = s.users # En list med alla användare relaterade till", "open(configfile, 'r') as ymlfile: try: config = yaml.load(ymlfile) except yaml.parser.ParserError: print('Could not parse", "import os import sys from sqlalchemy import func, create_engine from sqlalchemy.orm import sessionmaker", "GET SOME FIKA') h = time.localtime().tm_hour # Get server and user objects as", "action. Vi har serverobjektet. if s is not None: fikare_db = s.users #", ") elif client.user in message.mentions and 'unregister' in message.content: print('[on_message] Removing client ID:{0}", "if message.author == client.user: return import json print(json.dumps(message.raw_mentions)) print('Recived message') user_id = message.author.id" ]
[ "] \"\"\" OK = \"\\[ OK \\]\" RUN = \"\\[ RUN \\]\" FAILED", "content_value.find('\\n') if end_name_pos != -1: self.name_test = content_value[:end_name_pos] # Почему-то если в строке", "in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run') for", "def split_test_response(self, content_value): \"\"\" Returns: (name, out) \"\"\" end_name_pos = content_value.find('\\n') if end_name_pos", "record: record.position), how_split) result = [] for pair in response_pairs: head = pair[0]", "= [] for pair in response_pairs: head = pair[0] # с ключем run", "# с ключем run идет вервой if head.type_response == 'run': if len(pair) ==", "coding: utf-8 import re from inner_reuse import chunks class Ptr(object): \"\"\" Contain operation", "строке выше, то влияет на все строку, а не только на срезанную self.name_test", "срезанную self.name_test = self.name_test.replace('\\r', '') self.out = content_value[end_name_pos:].strip() else: self.name_test = content_value def", "list(re.finditer(RUN, out))]) how_split = 2 # По парам response_pairs = chunks(sorted(template, key=lambda record:", "a in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run')", "\\]\" RUN = \"\\[ RUN \\]\" FAILED = \"\\[ FAILED \\]\" template =", "= \"\\[ RUN \\]\" FAILED = \"\\[ FAILED \\]\" template = [] template.extend([Ptr(a.start(),", "= content_value[end_name_pos:].strip() else: self.name_test = content_value def parser_out(out): \"\"\" Returns: [(name, ok/fail/deadlock, out_one),", "OK = \"\\[ OK \\]\" RUN = \"\\[ RUN \\]\" FAILED = \"\\[", "self.type_response = type_key self.name_test = None self.out = \"\" def __repr__(self): return str(dict((k,", "operation data \"\"\" def __init__(self, pos, type_key): self.position = pos self.type_response = type_key", "v in self.__dict__.items() if k != 'position')) def get_data(self): \"\"\" Returns: key, value", "\"\"\" Returns: key, value \"\"\" return self.type_response, (self.name_test, self.out) def split_test_response(self, content_value): \"\"\"", "!= -1: self.name_test = content_value[:end_name_pos] # Почему-то если в строке выше, то влияет", "yield bottom.get_data() def parse_out_not_gen(out): gen = parser_out(out) report = [] for i in", "в строке выше, то влияет на все строку, а не только на срезанную", "data \"\"\" def __init__(self, pos, type_key): self.position = pos self.type_response = type_key self.name_test", "= content_value def parser_out(out): \"\"\" Returns: [(name, ok/fail/deadlock, out_one), ..., ] \"\"\" OK", "парам response_pairs = chunks(sorted(template, key=lambda record: record.position), how_split) result = [] for pair", "[] for pair in response_pairs: head = pair[0] # с ключем run идет", "response_pairs: head = pair[0] # с ключем run идет вервой if head.type_response ==", "template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED,", "\\]\" template = [] template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail')", "-1: self.name_test = content_value[:end_name_pos] # Почему-то если в строке выше, то влияет на", "def __init__(self, pos, type_key): self.position = pos self.type_response = type_key self.name_test = None", "\"\"\" end_name_pos = content_value.find('\\n') if end_name_pos != -1: self.name_test = content_value[:end_name_pos] # Почему-то", "== 1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom = pair[1] content = out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data()", "if end_name_pos != -1: self.name_test = content_value[:end_name_pos] # Почему-то если в строке выше,", "content_value[end_name_pos:].strip() else: self.name_test = content_value def parser_out(out): \"\"\" Returns: [(name, ok/fail/deadlock, out_one), ...,", "bottom.get_data() def parse_out_not_gen(out): gen = parser_out(out) report = [] for i in gen:", "for pair in response_pairs: head = pair[0] # с ключем run идет вервой", "def parse_out_not_gen(out): gen = parser_out(out) report = [] for i in gen: report.append(i)", "a in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN, out))]) how_split =", "return self.type_response, (self.name_test, self.out) def split_test_response(self, content_value): \"\"\" Returns: (name, out) \"\"\" end_name_pos", "in list(re.finditer(RUN, out))]) how_split = 2 # По парам response_pairs = chunks(sorted(template, key=lambda", "self.out = \"\" def __repr__(self): return str(dict((k, v) for k, v in self.__dict__.items()", "out_one), ..., ] \"\"\" OK = \"\\[ OK \\]\" RUN = \"\\[ RUN", "type_key self.name_test = None self.out = \"\" def __repr__(self): return str(dict((k, v) for", "Contain operation data \"\"\" def __init__(self, pos, type_key): self.position = pos self.type_response =", "how_split) result = [] for pair in response_pairs: head = pair[0] # с", "Returns: key, value \"\"\" return self.type_response, (self.name_test, self.out) def split_test_response(self, content_value): \"\"\" Returns:", "self.type_response, (self.name_test, self.out) def split_test_response(self, content_value): \"\"\" Returns: (name, out) \"\"\" end_name_pos =", "a in list(re.finditer(RUN, out))]) how_split = 2 # По парам response_pairs = chunks(sorted(template,", "= \"\\[ FAILED \\]\" template = [] template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK,", "type_key): self.position = pos self.type_response = type_key self.name_test = None self.out = \"\"", "for a in list(re.finditer(RUN, out))]) how_split = 2 # По парам response_pairs =", "= pair[1] content = out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data() def parse_out_not_gen(out): gen = parser_out(out)", "\"\"\" OK = \"\\[ OK \\]\" RUN = \"\\[ RUN \\]\" FAILED =", "'position')) def get_data(self): \"\"\" Returns: key, value \"\"\" return self.type_response, (self.name_test, self.out) def", "# По парам response_pairs = chunks(sorted(template, key=lambda record: record.position), how_split) result = []", "end_name_pos = content_value.find('\\n') if end_name_pos != -1: self.name_test = content_value[:end_name_pos] # Почему-то если", "get_data(self): \"\"\" Returns: key, value \"\"\" return self.type_response, (self.name_test, self.out) def split_test_response(self, content_value):", "pair[1] content = out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data() def parse_out_not_gen(out): gen = parser_out(out) report", "(name, out) \"\"\" end_name_pos = content_value.find('\\n') if end_name_pos != -1: self.name_test = content_value[:end_name_pos]", "= out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data() def parse_out_not_gen(out): gen = parser_out(out) report = []", "Returns: [(name, ok/fail/deadlock, out_one), ..., ] \"\"\" OK = \"\\[ OK \\]\" RUN", "выше, то влияет на все строку, а не только на срезанную self.name_test =", "= chunks(sorted(template, key=lambda record: record.position), how_split) result = [] for pair in response_pairs:", "== 'run': if len(pair) == 1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom = pair[1] content =", "OK \\]\" RUN = \"\\[ RUN \\]\" FAILED = \"\\[ FAILED \\]\" template", "__repr__(self): return str(dict((k, v) for k, v in self.__dict__.items() if k != 'position'))", "pair[0] # с ключем run идет вервой if head.type_response == 'run': if len(pair)", "__init__(self, pos, type_key): self.position = pos self.type_response = type_key self.name_test = None self.out", "content_value def parser_out(out): \"\"\" Returns: [(name, ok/fail/deadlock, out_one), ..., ] \"\"\" OK =", "'ok') for a in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED, out))])", "value \"\"\" return self.type_response, (self.name_test, self.out) def split_test_response(self, content_value): \"\"\" Returns: (name, out)", "ok/fail/deadlock, out_one), ..., ] \"\"\" OK = \"\\[ OK \\]\" RUN = \"\\[", "идет вервой if head.type_response == 'run': if len(pair) == 1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom", "Ptr(object): \"\"\" Contain operation data \"\"\" def __init__(self, pos, type_key): self.position = pos", "self.position = pos self.type_response = type_key self.name_test = None self.out = \"\" def", "= pos self.type_response = type_key self.name_test = None self.out = \"\" def __repr__(self):", "def __repr__(self): return str(dict((k, v) for k, v in self.__dict__.items() if k !=", "v) for k, v in self.__dict__.items() if k != 'position')) def get_data(self): \"\"\"", "# Почему-то если в строке выше, то влияет на все строку, а не", "строку, а не только на срезанную self.name_test = self.name_test.replace('\\r', '') self.out = content_value[end_name_pos:].strip()", "in self.__dict__.items() if k != 'position')) def get_data(self): \"\"\" Returns: key, value \"\"\"", "\"\\[ RUN \\]\" FAILED = \"\\[ FAILED \\]\" template = [] template.extend([Ptr(a.start(), 'ok')", "'fail') for a in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN, out))])", "key=lambda record: record.position), how_split) result = [] for pair in response_pairs: head =", "result = [] for pair in response_pairs: head = pair[0] # с ключем", "= type_key self.name_test = None self.out = \"\" def __repr__(self): return str(dict((k, v)", "if head.type_response == 'run': if len(pair) == 1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom = pair[1]", "record.position), how_split) result = [] for pair in response_pairs: head = pair[0] #", "\"\"\" Returns: [(name, ok/fail/deadlock, out_one), ..., ] \"\"\" OK = \"\\[ OK \\]\"", "= [] template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail') for a", "k, v in self.__dict__.items() if k != 'position')) def get_data(self): \"\"\" Returns: key,", "= self.name_test.replace('\\r', '') self.out = content_value[end_name_pos:].strip() else: self.name_test = content_value def parser_out(out): \"\"\"", "= pair[0] # с ключем run идет вервой if head.type_response == 'run': if", "pos self.type_response = type_key self.name_test = None self.out = \"\" def __repr__(self): return", "self.out) def split_test_response(self, content_value): \"\"\" Returns: (name, out) \"\"\" end_name_pos = content_value.find('\\n') if", "= 2 # По парам response_pairs = chunks(sorted(template, key=lambda record: record.position), how_split) result", "вервой if head.type_response == 'run': if len(pair) == 1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom =", "RUN = \"\\[ RUN \\]\" FAILED = \"\\[ FAILED \\]\" template = []", "на срезанную self.name_test = self.name_test.replace('\\r', '') self.out = content_value[end_name_pos:].strip() else: self.name_test = content_value", "template = [] template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail') for", "2 # По парам response_pairs = chunks(sorted(template, key=lambda record: record.position), how_split) result =", "response_pairs = chunks(sorted(template, key=lambda record: record.position), how_split) result = [] for pair in", "None self.out = \"\" def __repr__(self): return str(dict((k, v) for k, v in", "= \"\\[ OK \\]\" RUN = \"\\[ RUN \\]\" FAILED = \"\\[ FAILED", "gen = parser_out(out) report = [] for i in gen: report.append(i) return report", "str(dict((k, v) for k, v in self.__dict__.items() if k != 'position')) def get_data(self):", "from inner_reuse import chunks class Ptr(object): \"\"\" Contain operation data \"\"\" def __init__(self,", "chunks class Ptr(object): \"\"\" Contain operation data \"\"\" def __init__(self, pos, type_key): self.position", "pair.append(Ptr(out.__len__(), 'deadlock')) bottom = pair[1] content = out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data() def parse_out_not_gen(out):", "\"\\[ FAILED \\]\" template = [] template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK, out))])", "inner_reuse import chunks class Ptr(object): \"\"\" Contain operation data \"\"\" def __init__(self, pos,", "utf-8 import re from inner_reuse import chunks class Ptr(object): \"\"\" Contain operation data", "self.name_test = self.name_test.replace('\\r', '') self.out = content_value[end_name_pos:].strip() else: self.name_test = content_value def parser_out(out):", "def parser_out(out): \"\"\" Returns: [(name, ok/fail/deadlock, out_one), ..., ] \"\"\" OK = \"\\[", "out))]) template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run') for a in", "!= 'position')) def get_data(self): \"\"\" Returns: key, value \"\"\" return self.type_response, (self.name_test, self.out)", "self.__dict__.items() if k != 'position')) def get_data(self): \"\"\" Returns: key, value \"\"\" return", "len(pair) == 1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom = pair[1] content = out[head.position:bottom.position].strip() bottom.split_test_response(content) yield", "template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN,", "\"\" def __repr__(self): return str(dict((k, v) for k, v in self.__dict__.items() if k", "k != 'position')) def get_data(self): \"\"\" Returns: key, value \"\"\" return self.type_response, (self.name_test,", "how_split = 2 # По парам response_pairs = chunks(sorted(template, key=lambda record: record.position), how_split)", "..., ] \"\"\" OK = \"\\[ OK \\]\" RUN = \"\\[ RUN \\]\"", "in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN, out))]) how_split = 2", "с ключем run идет вервой if head.type_response == 'run': if len(pair) == 1:", "= content_value[:end_name_pos] # Почему-то если в строке выше, то влияет на все строку,", "self.name_test = content_value[:end_name_pos] # Почему-то если в строке выше, то влияет на все", "'run') for a in list(re.finditer(RUN, out))]) how_split = 2 # По парам response_pairs", "run идет вервой if head.type_response == 'run': if len(pair) == 1: pair.append(Ptr(out.__len__(), 'deadlock'))", "self.name_test = None self.out = \"\" def __repr__(self): return str(dict((k, v) for k,", "\"\\[ OK \\]\" RUN = \"\\[ RUN \\]\" FAILED = \"\\[ FAILED \\]\"", "bottom.split_test_response(content) yield bottom.get_data() def parse_out_not_gen(out): gen = parser_out(out) report = [] for i", "= content_value.find('\\n') if end_name_pos != -1: self.name_test = content_value[:end_name_pos] # Почему-то если в", "\"\"\" def __init__(self, pos, type_key): self.position = pos self.type_response = type_key self.name_test =", "влияет на все строку, а не только на срезанную self.name_test = self.name_test.replace('\\r', '')", "не только на срезанную self.name_test = self.name_test.replace('\\r', '') self.out = content_value[end_name_pos:].strip() else: self.name_test", "на все строку, а не только на срезанную self.name_test = self.name_test.replace('\\r', '') self.out", "\\]\" FAILED = \"\\[ FAILED \\]\" template = [] template.extend([Ptr(a.start(), 'ok') for a", "bottom = pair[1] content = out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data() def parse_out_not_gen(out): gen =", "'run': if len(pair) == 1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom = pair[1] content = out[head.position:bottom.position].strip()", "= \"\" def __repr__(self): return str(dict((k, v) for k, v in self.__dict__.items() if", "[(name, ok/fail/deadlock, out_one), ..., ] \"\"\" OK = \"\\[ OK \\]\" RUN =", "только на срезанную self.name_test = self.name_test.replace('\\r', '') self.out = content_value[end_name_pos:].strip() else: self.name_test =", "list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN, out))]) how_split = 2 #", "Почему-то если в строке выше, то влияет на все строку, а не только", "self.out = content_value[end_name_pos:].strip() else: self.name_test = content_value def parser_out(out): \"\"\" Returns: [(name, ok/fail/deadlock,", "\"\"\" Contain operation data \"\"\" def __init__(self, pos, type_key): self.position = pos self.type_response", "content_value[:end_name_pos] # Почему-то если в строке выше, то влияет на все строку, а", "for a in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN, out))]) how_split", "# coding: utf-8 import re from inner_reuse import chunks class Ptr(object): \"\"\" Contain", "(self.name_test, self.out) def split_test_response(self, content_value): \"\"\" Returns: (name, out) \"\"\" end_name_pos = content_value.find('\\n')", "1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom = pair[1] content = out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data() def", "\"\"\" Returns: (name, out) \"\"\" end_name_pos = content_value.find('\\n') if end_name_pos != -1: self.name_test", "\"\"\" return self.type_response, (self.name_test, self.out) def split_test_response(self, content_value): \"\"\" Returns: (name, out) \"\"\"", "for k, v in self.__dict__.items() if k != 'position')) def get_data(self): \"\"\" Returns:", "template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN, out))]) how_split = 2 # По парам", "Returns: (name, out) \"\"\" end_name_pos = content_value.find('\\n') if end_name_pos != -1: self.name_test =", "parser_out(out): \"\"\" Returns: [(name, ok/fail/deadlock, out_one), ..., ] \"\"\" OK = \"\\[ OK", "if k != 'position')) def get_data(self): \"\"\" Returns: key, value \"\"\" return self.type_response,", "content_value): \"\"\" Returns: (name, out) \"\"\" end_name_pos = content_value.find('\\n') if end_name_pos != -1:", "pos, type_key): self.position = pos self.type_response = type_key self.name_test = None self.out =", "По парам response_pairs = chunks(sorted(template, key=lambda record: record.position), how_split) result = [] for", "= None self.out = \"\" def __repr__(self): return str(dict((k, v) for k, v", "out) \"\"\" end_name_pos = content_value.find('\\n') if end_name_pos != -1: self.name_test = content_value[:end_name_pos] #", "content = out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data() def parse_out_not_gen(out): gen = parser_out(out) report =", "return str(dict((k, v) for k, v in self.__dict__.items() if k != 'position')) def", "'deadlock')) bottom = pair[1] content = out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data() def parse_out_not_gen(out): gen", "self.name_test.replace('\\r', '') self.out = content_value[end_name_pos:].strip() else: self.name_test = content_value def parser_out(out): \"\"\" Returns:", "pair in response_pairs: head = pair[0] # с ключем run идет вервой if", "class Ptr(object): \"\"\" Contain operation data \"\"\" def __init__(self, pos, type_key): self.position =", "def get_data(self): \"\"\" Returns: key, value \"\"\" return self.type_response, (self.name_test, self.out) def split_test_response(self,", "self.name_test = content_value def parser_out(out): \"\"\" Returns: [(name, ok/fail/deadlock, out_one), ..., ] \"\"\"", "[] template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail') for a in", "RUN \\]\" FAILED = \"\\[ FAILED \\]\" template = [] template.extend([Ptr(a.start(), 'ok') for", "ключем run идет вервой if head.type_response == 'run': if len(pair) == 1: pair.append(Ptr(out.__len__(),", "re from inner_reuse import chunks class Ptr(object): \"\"\" Contain operation data \"\"\" def", "head.type_response == 'run': if len(pair) == 1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom = pair[1] content", "то влияет на все строку, а не только на срезанную self.name_test = self.name_test.replace('\\r',", "for a in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(),", "import re from inner_reuse import chunks class Ptr(object): \"\"\" Contain operation data \"\"\"", "import chunks class Ptr(object): \"\"\" Contain operation data \"\"\" def __init__(self, pos, type_key):", "head = pair[0] # с ключем run идет вервой if head.type_response == 'run':", "key, value \"\"\" return self.type_response, (self.name_test, self.out) def split_test_response(self, content_value): \"\"\" Returns: (name,", "else: self.name_test = content_value def parser_out(out): \"\"\" Returns: [(name, ok/fail/deadlock, out_one), ..., ]", "parse_out_not_gen(out): gen = parser_out(out) report = [] for i in gen: report.append(i) return", "out))]) how_split = 2 # По парам response_pairs = chunks(sorted(template, key=lambda record: record.position),", "FAILED \\]\" template = [] template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK, out))]) template.extend([Ptr(a.start(),", "если в строке выше, то влияет на все строку, а не только на", "out))]) template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN, out))]) how_split = 2 # По", "in response_pairs: head = pair[0] # с ключем run идет вервой if head.type_response", "split_test_response(self, content_value): \"\"\" Returns: (name, out) \"\"\" end_name_pos = content_value.find('\\n') if end_name_pos !=", "end_name_pos != -1: self.name_test = content_value[:end_name_pos] # Почему-то если в строке выше, то", "out[head.position:bottom.position].strip() bottom.split_test_response(content) yield bottom.get_data() def parse_out_not_gen(out): gen = parser_out(out) report = [] for", "chunks(sorted(template, key=lambda record: record.position), how_split) result = [] for pair in response_pairs: head", "list(re.finditer(OK, out))]) template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED, out))]) template.extend([Ptr(a.end(), 'run') for a", "а не только на срезанную self.name_test = self.name_test.replace('\\r', '') self.out = content_value[end_name_pos:].strip() else:", "все строку, а не только на срезанную self.name_test = self.name_test.replace('\\r', '') self.out =", "'') self.out = content_value[end_name_pos:].strip() else: self.name_test = content_value def parser_out(out): \"\"\" Returns: [(name,", "if len(pair) == 1: pair.append(Ptr(out.__len__(), 'deadlock')) bottom = pair[1] content = out[head.position:bottom.position].strip() bottom.split_test_response(content)", "FAILED = \"\\[ FAILED \\]\" template = [] template.extend([Ptr(a.start(), 'ok') for a in" ]
[ "import numpy import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import", "x=x.reshape(-1) print(x.shape) print(x) y = [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) = 10 pf=numpy.polyfit(x,y,3) print(pf)", "import matplotlib.pyplot as plt from sklearn.metrics import r2_score x = [1,2,3,4] x=numpy.array(x) print(x.shape)", "x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1) print(x.shape) print(x) y = [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) =", "#x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) = 10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model = numpy.poly1d(pf) drv=model.deriv() print(model([1,2,3,4]))", "print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1) print(x.shape) print(x) y = [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x)", "pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model = numpy.poly1d(pf) drv=model.deriv() print(model([1,2,3,4])) print(type(drv)) print(model) print(drv) coeff=r2_score(y, model(x))", "as plt from sklearn.metrics import r2_score x = [1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape)", "pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import r2_score x =", "[1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1) print(x.shape) print(x) y = [2,4,6,8] #x*2=[2,4,6,8]", "print(pf) print(type(pf)) model = numpy.poly1d(pf) drv=model.deriv() print(model([1,2,3,4])) print(type(drv)) print(model) print(drv) coeff=r2_score(y, model(x)) print(coeff)", "x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1) print(x.shape) print(x) y = [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16]", "r2_score x = [1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1) print(x.shape) print(x) y", "as pd import matplotlib.pyplot as plt from sklearn.metrics import r2_score x = [1,2,3,4]", "numpy import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import r2_score", "= 10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model = numpy.poly1d(pf) drv=model.deriv() print(model([1,2,3,4])) print(type(drv)) print(model) print(drv)", "#x*x=[1,4,9,16] #sum(x) = 10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model = numpy.poly1d(pf) drv=model.deriv() print(model([1,2,3,4])) print(type(drv))", "pd import matplotlib.pyplot as plt from sklearn.metrics import r2_score x = [1,2,3,4] x=numpy.array(x)", "plt from sklearn.metrics import r2_score x = [1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x)", "sklearn.metrics import r2_score x = [1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1) print(x.shape)", "= [1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1) print(x.shape) print(x) y = [2,4,6,8]", "print(x.shape) print(x) y = [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) = 10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf))", "matplotlib.pyplot as plt from sklearn.metrics import r2_score x = [1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1)", "= [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) = 10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model = numpy.poly1d(pf)", "print(x) y = [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) = 10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model", "print(x.shape) print(x) x=x.reshape(-1) print(x.shape) print(x) y = [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) = 10", "from sklearn.metrics import r2_score x = [1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1)", "import r2_score x = [1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1) print(x.shape) print(x)", "import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import r2_score x", "print(x) x=x.reshape(-1) print(x.shape) print(x) y = [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) = 10 pf=numpy.polyfit(x,y,3)", "y = [2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) = 10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model =", "10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model = numpy.poly1d(pf) drv=model.deriv() print(model([1,2,3,4])) print(type(drv)) print(model) print(drv) coeff=r2_score(y,", "x = [1,2,3,4] x=numpy.array(x) print(x.shape) x=x.reshape(2,-1) print(x.shape) print(x) x=x.reshape(-1) print(x.shape) print(x) y =", "[2,4,6,8] #x*2=[2,4,6,8] #x*x=[1,4,9,16] #sum(x) = 10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model = numpy.poly1d(pf) drv=model.deriv()", "#sum(x) = 10 pf=numpy.polyfit(x,y,3) print(pf) print(type(pf)) model = numpy.poly1d(pf) drv=model.deriv() print(model([1,2,3,4])) print(type(drv)) print(model)" ]
[ "n sides, you need to answer whether it is possible to make n", "First line contains ,no .of testcases. For each test case , first line", "arr=list(map(int,input().split())) total=sum(arr) flag=1 for i in arr: if total-i <= i: flag=0 break", "is possible to make n sided convex polygon with it. Input : -", "Input : - First line contains ,no .of testcases. For each test case", "contains ,no .of testcases. For each test case , first line consist of", ",no .of testcases. For each test case , first line consist of single", "need to answer whether it is possible to make n sided convex polygon", "For each test case , first line consist of single integer ,second line", "length of n sides, you need to answer whether it is possible to", "1 2 1 4 SAMPLE OUTPUT Yes No ''' for _ in range(int(input())):", "is not possible. SAMPLE INPUT 2 3 4 3 2 4 1 2", "print \"Yes\" if it is possible to make polygon or else \"No\" if", "n sided convex polygon with it. Input : - First line contains ,no", ".of testcases. For each test case , first line consist of single integer", "total=sum(arr) flag=1 for i in arr: if total-i <= i: flag=0 break print", "whether it is possible to make n sided convex polygon with it. Input", "to answer whether it is possible to make n sided convex polygon with", "test case print \"Yes\" if it is possible to make polygon or else", "polygon or else \"No\" if it is not possible. SAMPLE INPUT 2 3", "- For each test case print \"Yes\" if it is possible to make", "of each side. Output : - For each test case print \"Yes\" if", "INPUT 2 3 4 3 2 4 1 2 1 4 SAMPLE OUTPUT", "or else \"No\" if it is not possible. SAMPLE INPUT 2 3 4", "- First line contains ,no .of testcases. For each test case , first", ": - For each test case print \"Yes\" if it is possible to", "test case , first line consist of single integer ,second line consist of", "if it is possible to make polygon or else \"No\" if it is", "2 4 1 2 1 4 SAMPLE OUTPUT Yes No ''' for _", "each test case print \"Yes\" if it is possible to make polygon or", "answer whether it is possible to make n sided convex polygon with it.", "each test case , first line consist of single integer ,second line consist", "it is possible to make polygon or else \"No\" if it is not", "of single integer ,second line consist of spaced integers, size of each side.", "3 4 3 2 4 1 2 1 4 SAMPLE OUTPUT Yes No", "to make polygon or else \"No\" if it is not possible. SAMPLE INPUT", "3 2 4 1 2 1 4 SAMPLE OUTPUT Yes No ''' for", "in range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1 for i in arr: if total-i <=", "polygon with it. Input : - First line contains ,no .of testcases. For", "''' You are given length of n sides, you need to answer whether", "given length of n sides, you need to answer whether it is possible", "are given length of n sides, you need to answer whether it is", "line contains ,no .of testcases. For each test case , first line consist", "1 4 SAMPLE OUTPUT Yes No ''' for _ in range(int(input())): n=int(input()) arr=list(map(int,input().split()))", "i in arr: if total-i <= i: flag=0 break print (\"Yes\" if flag==1", "each side. Output : - For each test case print \"Yes\" if it", "_ in range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1 for i in arr: if total-i", "4 3 2 4 1 2 1 4 SAMPLE OUTPUT Yes No '''", "for i in arr: if total-i <= i: flag=0 break print (\"Yes\" if", "line consist of spaced integers, size of each side. Output : - For", "of n sides, you need to answer whether it is possible to make", "No ''' for _ in range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1 for i in", "integers, size of each side. Output : - For each test case print", "SAMPLE INPUT 2 3 4 3 2 4 1 2 1 4 SAMPLE", "4 1 2 1 4 SAMPLE OUTPUT Yes No ''' for _ in", "single integer ,second line consist of spaced integers, size of each side. Output", "4 SAMPLE OUTPUT Yes No ''' for _ in range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr)", ", first line consist of single integer ,second line consist of spaced integers,", "for _ in range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1 for i in arr: if", ",second line consist of spaced integers, size of each side. Output : -", "Yes No ''' for _ in range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1 for i", "spaced integers, size of each side. Output : - For each test case", "2 3 4 3 2 4 1 2 1 4 SAMPLE OUTPUT Yes", "Output : - For each test case print \"Yes\" if it is possible", "\"No\" if it is not possible. SAMPLE INPUT 2 3 4 3 2", "flag=1 for i in arr: if total-i <= i: flag=0 break print (\"Yes\"", "range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1 for i in arr: if total-i <= i:", "arr: if total-i <= i: flag=0 break print (\"Yes\" if flag==1 else \"No\")", "you need to answer whether it is possible to make n sided convex", "line consist of single integer ,second line consist of spaced integers, size of", "SAMPLE OUTPUT Yes No ''' for _ in range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1", "testcases. For each test case , first line consist of single integer ,second", "2 1 4 SAMPLE OUTPUT Yes No ''' for _ in range(int(input())): n=int(input())", "OUTPUT Yes No ''' for _ in range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1 for", "For each test case print \"Yes\" if it is possible to make polygon", "size of each side. Output : - For each test case print \"Yes\"", "possible. SAMPLE INPUT 2 3 4 3 2 4 1 2 1 4", "possible to make n sided convex polygon with it. Input : - First", "sided convex polygon with it. Input : - First line contains ,no .of", "if it is not possible. SAMPLE INPUT 2 3 4 3 2 4", "first line consist of single integer ,second line consist of spaced integers, size", "is possible to make polygon or else \"No\" if it is not possible.", "integer ,second line consist of spaced integers, size of each side. Output :", "make polygon or else \"No\" if it is not possible. SAMPLE INPUT 2", "sides, you need to answer whether it is possible to make n sided", "You are given length of n sides, you need to answer whether it", "make n sided convex polygon with it. Input : - First line contains", "n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1 for i in arr: if total-i <= i: flag=0", "side. Output : - For each test case print \"Yes\" if it is", "case print \"Yes\" if it is possible to make polygon or else \"No\"", "to make n sided convex polygon with it. Input : - First line", "with it. Input : - First line contains ,no .of testcases. For each", ": - First line contains ,no .of testcases. For each test case ,", "it. Input : - First line contains ,no .of testcases. For each test", "of spaced integers, size of each side. Output : - For each test", "in arr: if total-i <= i: flag=0 break print (\"Yes\" if flag==1 else", "''' for _ in range(int(input())): n=int(input()) arr=list(map(int,input().split())) total=sum(arr) flag=1 for i in arr:", "consist of single integer ,second line consist of spaced integers, size of each", "it is possible to make n sided convex polygon with it. Input :", "not possible. SAMPLE INPUT 2 3 4 3 2 4 1 2 1", "else \"No\" if it is not possible. SAMPLE INPUT 2 3 4 3", "it is not possible. SAMPLE INPUT 2 3 4 3 2 4 1", "case , first line consist of single integer ,second line consist of spaced", "possible to make polygon or else \"No\" if it is not possible. SAMPLE", "consist of spaced integers, size of each side. Output : - For each", "\"Yes\" if it is possible to make polygon or else \"No\" if it", "convex polygon with it. Input : - First line contains ,no .of testcases." ]
[ "shodanKey self.vtKey = vtKey self.vtUser = vtUser self.vtReqPerMin = vtReqPerMin self.disableVT = disableVT", "'urlcharlimit') osintDays = parser.get('Core', 'osintdays') malShareApiKey = parser.get('MalShare', 'apikey') disableMalShare = parser.get('MalShare', 'disable')", "= parser.get('VirusTotal', 'apikey') vtUser = parser.get('VirusTotal', 'username') vtReqPerMin = parser.get('VirusTotal', 'requestsperminute') disableVT =", "parser.get('VirusTotal', 'disable') viperUrlAdd = parser.get('Viper', 'addurl') viperApiToken = parser.get('Viper', 'apitoken') return baseObj(multiProcess, userAgent,", "'disable') viperUrlAdd = parser.get('Viper', 'addurl') viperApiToken = parser.get('Viper', 'apitoken') return baseObj(multiProcess, userAgent, outputFolderName,", "= userAgent self.outputFolderName = outputFolderName self.outputFolder = outputFolder self.deleteOutput = deleteOutput self.dateFormat =", "torIP self.torPort = torPort self.redirectLimit = redirectLimit self.hashCountLimit = hashCountLimit self.urlCharLimit = urlCharLimit", "self.urlCharLimit = urlCharLimit self.osintDays = osintDays self.malShareApiKey = malShareApiKey self.disableMalShare = disableMalShare self.disableOsint", "parser.get('Core', 'usetor') torIP = parser.get('Core', 'torip') torPort = parser.get('Core', 'torport') redirectLimit = parser.get('Core',", "= parser.get('Core', 'deleteoutput') dateFormat = parser.get('Core', 'dateformat') useTor = parser.get('Core', 'usetor') torIP =", "= parser.get('Core', 'dateformat') useTor = parser.get('Core', 'usetor') torIP = parser.get('Core', 'torip') torPort =", "= urlCharLimit self.osintDays = osintDays self.malShareApiKey = malShareApiKey self.disableMalShare = disableMalShare self.disableOsint =", "string class baseObj: def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP,", "from ConfigParser import SafeConfigParser import os import string class baseObj: def __init__(self, multiProcess,", "disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken): self.multiProcess = multiProcess", "'hashcountlimit') urlCharLimit = parser.get('Core', 'urlcharlimit') osintDays = parser.get('Core', 'osintdays') malShareApiKey = parser.get('MalShare', 'apikey')", "disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken): self.multiProcess = multiProcess self.userAgent", "= deleteOutput self.dateFormat = dateFormat self.useTor = useTor self.torIP = torIP self.torPort =", "= disableVT self.viperUrlAdd = viperUrlAdd self.viperApiToken = 'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir): parser =", "vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken): self.multiProcess = multiProcess self.userAgent = userAgent self.outputFolderName =", "= parser.get('OTX', 'apikey') shodanKey = parser.get('Shodan', 'apikey') vtKey = parser.get('VirusTotal', 'apikey') vtUser =", "vtKey self.vtUser = vtUser self.vtReqPerMin = vtReqPerMin self.disableVT = disableVT self.viperUrlAdd = viperUrlAdd", "'apikey') vtUser = parser.get('VirusTotal', 'username') vtReqPerMin = parser.get('VirusTotal', 'requestsperminute') disableVT = parser.get('VirusTotal', 'disable')", "disableVT, viperUrlAdd, viperApiToken): self.multiProcess = multiProcess self.userAgent = userAgent self.outputFolderName = outputFolderName self.outputFolder", "outputFolderName) deleteOutput = parser.get('Core', 'deleteoutput') dateFormat = parser.get('Core', 'dateformat') useTor = parser.get('Core', 'usetor')", "= parser.get('Core', 'torip') torPort = parser.get('Core', 'torport') redirectLimit = parser.get('Core', 'redirectlimit') hashCountLimit =", "'deleteoutput') dateFormat = parser.get('Core', 'dateformat') useTor = parser.get('Core', 'usetor') torIP = parser.get('Core', 'torip')", "osintDays self.malShareApiKey = malShareApiKey self.disableMalShare = disableMalShare self.disableOsint = disableOsint self.otxKey = otxKey", "hashCountLimit = parser.get('Core', 'hashcountlimit') urlCharLimit = parser.get('Core', 'urlcharlimit') osintDays = parser.get('Core', 'osintdays') malShareApiKey", "self.vtUser = vtUser self.vtReqPerMin = vtReqPerMin self.disableVT = disableVT self.viperUrlAdd = viperUrlAdd self.viperApiToken", "self.vtReqPerMin = vtReqPerMin self.disableVT = disableVT self.viperUrlAdd = viperUrlAdd self.viperApiToken = 'Token {0}'.format(viperApiToken)", "urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken):", "dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey,", "self.deleteOutput = deleteOutput self.dateFormat = dateFormat self.useTor = useTor self.torIP = torIP self.torPort", "viperApiToken = parser.get('Viper', 'apitoken') return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP,", "= parser.get('MalShare', 'apikey') disableMalShare = parser.get('MalShare', 'disable') disableOsint = parser.get('OSINT', 'disable') otxKey =", "self.disableMalShare = disableMalShare self.disableOsint = disableOsint self.otxKey = otxKey self.shodanKey = shodanKey self.vtKey", "'config', 'settings.conf')) multiProcess = parser.get('Core', 'multiprocess') userAgent = parser.get('Core', 'useragent') outputFolderName = parser.get('Core',", "= multiProcess self.userAgent = userAgent self.outputFolderName = outputFolderName self.outputFolder = outputFolder self.deleteOutput =", "= otxKey self.shodanKey = shodanKey self.vtKey = vtKey self.vtUser = vtUser self.vtReqPerMin =", "= parser.get('Core', 'multiprocess') userAgent = parser.get('Core', 'useragent') outputFolderName = parser.get('Core', 'outputfolder') outputFolder =", "'torport') redirectLimit = parser.get('Core', 'redirectlimit') hashCountLimit = parser.get('Core', 'hashcountlimit') urlCharLimit = parser.get('Core', 'urlcharlimit')", "disableOsint = parser.get('OSINT', 'disable') otxKey = parser.get('OTX', 'apikey') shodanKey = parser.get('Shodan', 'apikey') vtKey", "baseObj: def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit,", "= redirectLimit self.hashCountLimit = hashCountLimit self.urlCharLimit = urlCharLimit self.osintDays = osintDays self.malShareApiKey =", "'disable') disableOsint = parser.get('OSINT', 'disable') otxKey = parser.get('OTX', 'apikey') shodanKey = parser.get('Shodan', 'apikey')", "= parser.get('Core', 'redirectlimit') hashCountLimit = parser.get('Core', 'hashcountlimit') urlCharLimit = parser.get('Core', 'urlcharlimit') osintDays =", "'settings.conf')) multiProcess = parser.get('Core', 'multiprocess') userAgent = parser.get('Core', 'useragent') outputFolderName = parser.get('Core', 'outputfolder')", "parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess = parser.get('Core', 'multiprocess') userAgent = parser.get('Core', 'useragent') outputFolderName =", "redirectLimit = parser.get('Core', 'redirectlimit') hashCountLimit = parser.get('Core', 'hashcountlimit') urlCharLimit = parser.get('Core', 'urlcharlimit') osintDays", "'apikey') disableMalShare = parser.get('MalShare', 'disable') disableOsint = parser.get('OSINT', 'disable') otxKey = parser.get('OTX', 'apikey')", "disableOsint self.otxKey = otxKey self.shodanKey = shodanKey self.vtKey = vtKey self.vtUser = vtUser", "'outputfolder') outputFolder = os.path.join(rootDir, outputFolderName) deleteOutput = parser.get('Core', 'deleteoutput') dateFormat = parser.get('Core', 'dateformat')", "'username') vtReqPerMin = parser.get('VirusTotal', 'requestsperminute') disableVT = parser.get('VirusTotal', 'disable') viperUrlAdd = parser.get('Viper', 'addurl')", "self.hashCountLimit = hashCountLimit self.urlCharLimit = urlCharLimit self.osintDays = osintDays self.malShareApiKey = malShareApiKey self.disableMalShare", "import SafeConfigParser import os import string class baseObj: def __init__(self, multiProcess, userAgent, outputFolderName,", "self.useTor = useTor self.torIP = torIP self.torPort = torPort self.redirectLimit = redirectLimit self.hashCountLimit", "self.osintDays = osintDays self.malShareApiKey = malShareApiKey self.disableMalShare = disableMalShare self.disableOsint = disableOsint self.otxKey", "parser.get('Viper', 'addurl') viperApiToken = parser.get('Viper', 'apitoken') return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat,", "= parser.get('VirusTotal', 'requestsperminute') disableVT = parser.get('VirusTotal', 'disable') viperUrlAdd = parser.get('Viper', 'addurl') viperApiToken =", "= parser.get('Viper', 'addurl') viperApiToken = parser.get('Viper', 'apitoken') return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput,", "userAgent = parser.get('Core', 'useragent') outputFolderName = parser.get('Core', 'outputfolder') outputFolder = os.path.join(rootDir, outputFolderName) deleteOutput", "os.path.join(rootDir, outputFolderName) deleteOutput = parser.get('Core', 'deleteoutput') dateFormat = parser.get('Core', 'dateformat') useTor = parser.get('Core',", "= parser.get('Core', 'urlcharlimit') osintDays = parser.get('Core', 'osintdays') malShareApiKey = parser.get('MalShare', 'apikey') disableMalShare =", "self.userAgent = userAgent self.outputFolderName = outputFolderName self.outputFolder = outputFolder self.deleteOutput = deleteOutput self.dateFormat", "outputFolder = os.path.join(rootDir, outputFolderName) deleteOutput = parser.get('Core', 'deleteoutput') dateFormat = parser.get('Core', 'dateformat') useTor", "= shodanKey self.vtKey = vtKey self.vtUser = vtUser self.vtReqPerMin = vtReqPerMin self.disableVT =", "viperUrlAdd = parser.get('Viper', 'addurl') viperApiToken = parser.get('Viper', 'apitoken') return baseObj(multiProcess, userAgent, outputFolderName, outputFolder,", "self.viperUrlAdd = viperUrlAdd self.viperApiToken = 'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir): parser = SafeConfigParser() parser.read(os.path.join(rootDir,", "torIP = parser.get('Core', 'torip') torPort = parser.get('Core', 'torport') redirectLimit = parser.get('Core', 'redirectlimit') hashCountLimit", "disableMalShare = parser.get('MalShare', 'disable') disableOsint = parser.get('OSINT', 'disable') otxKey = parser.get('OTX', 'apikey') shodanKey", "urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken)", "class baseObj: def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort,", "disableVT self.viperUrlAdd = viperUrlAdd self.viperApiToken = 'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir): parser = SafeConfigParser()", "shodanKey = parser.get('Shodan', 'apikey') vtKey = parser.get('VirusTotal', 'apikey') vtUser = parser.get('VirusTotal', 'username') vtReqPerMin", "self.disableOsint = disableOsint self.otxKey = otxKey self.shodanKey = shodanKey self.vtKey = vtKey self.vtUser", "= parser.get('MalShare', 'disable') disableOsint = parser.get('OSINT', 'disable') otxKey = parser.get('OTX', 'apikey') shodanKey =", "userAgent self.outputFolderName = outputFolderName self.outputFolder = outputFolder self.deleteOutput = deleteOutput self.dateFormat = dateFormat", "vtReqPerMin = parser.get('VirusTotal', 'requestsperminute') disableVT = parser.get('VirusTotal', 'disable') viperUrlAdd = parser.get('Viper', 'addurl') viperApiToken", "deleteOutput self.dateFormat = dateFormat self.useTor = useTor self.torIP = torIP self.torPort = torPort", "SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess = parser.get('Core', 'multiprocess') userAgent = parser.get('Core', 'useragent') outputFolderName", "parser.get('VirusTotal', 'requestsperminute') disableVT = parser.get('VirusTotal', 'disable') viperUrlAdd = parser.get('Viper', 'addurl') viperApiToken = parser.get('Viper',", "def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit,", "self.viperApiToken = 'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir): parser = SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess", "parser.get('Core', 'dateformat') useTor = parser.get('Core', 'usetor') torIP = parser.get('Core', 'torip') torPort = parser.get('Core',", "urlCharLimit = parser.get('Core', 'urlcharlimit') osintDays = parser.get('Core', 'osintdays') malShareApiKey = parser.get('MalShare', 'apikey') disableMalShare", "self.disableVT = disableVT self.viperUrlAdd = viperUrlAdd self.viperApiToken = 'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir): parser", "'dateformat') useTor = parser.get('Core', 'usetor') torIP = parser.get('Core', 'torip') torPort = parser.get('Core', 'torport')", "parser.get('Core', 'hashcountlimit') urlCharLimit = parser.get('Core', 'urlcharlimit') osintDays = parser.get('Core', 'osintdays') malShareApiKey = parser.get('MalShare',", "multiProcess = parser.get('Core', 'multiprocess') userAgent = parser.get('Core', 'useragent') outputFolderName = parser.get('Core', 'outputfolder') outputFolder", "= malShareApiKey self.disableMalShare = disableMalShare self.disableOsint = disableOsint self.otxKey = otxKey self.shodanKey =", "torPort = parser.get('Core', 'torport') redirectLimit = parser.get('Core', 'redirectlimit') hashCountLimit = parser.get('Core', 'hashcountlimit') urlCharLimit", "parser.get('Core', 'urlcharlimit') osintDays = parser.get('Core', 'osintdays') malShareApiKey = parser.get('MalShare', 'apikey') disableMalShare = parser.get('MalShare',", "dateFormat = parser.get('Core', 'dateformat') useTor = parser.get('Core', 'usetor') torIP = parser.get('Core', 'torip') torPort", "vtReqPerMin, disableVT, viperUrlAdd, viperApiToken): self.multiProcess = multiProcess self.userAgent = userAgent self.outputFolderName = outputFolderName", "= vtReqPerMin self.disableVT = disableVT self.viperUrlAdd = viperUrlAdd self.viperApiToken = 'Token {0}'.format(viperApiToken) def", "parser.get('MalShare', 'apikey') disableMalShare = parser.get('MalShare', 'disable') disableOsint = parser.get('OSINT', 'disable') otxKey = parser.get('OTX',", "useTor self.torIP = torIP self.torPort = torPort self.redirectLimit = redirectLimit self.hashCountLimit = hashCountLimit", "= parser.get('Core', 'hashcountlimit') urlCharLimit = parser.get('Core', 'urlcharlimit') osintDays = parser.get('Core', 'osintdays') malShareApiKey =", "useTor = parser.get('Core', 'usetor') torIP = parser.get('Core', 'torip') torPort = parser.get('Core', 'torport') redirectLimit", "shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken): self.multiProcess = multiProcess self.userAgent = userAgent", "parser.get('Core', 'useragent') outputFolderName = parser.get('Core', 'outputfolder') outputFolder = os.path.join(rootDir, outputFolderName) deleteOutput = parser.get('Core',", "malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken): self.multiProcess =", "self.torIP = torIP self.torPort = torPort self.redirectLimit = redirectLimit self.hashCountLimit = hashCountLimit self.urlCharLimit", "= vtUser self.vtReqPerMin = vtReqPerMin self.disableVT = disableVT self.viperUrlAdd = viperUrlAdd self.viperApiToken =", "deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey,", "parser.get('Core', 'multiprocess') userAgent = parser.get('Core', 'useragent') outputFolderName = parser.get('Core', 'outputfolder') outputFolder = os.path.join(rootDir,", "'usetor') torIP = parser.get('Core', 'torip') torPort = parser.get('Core', 'torport') redirectLimit = parser.get('Core', 'redirectlimit')", "= 'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir): parser = SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess =", "parser = SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess = parser.get('Core', 'multiprocess') userAgent = parser.get('Core',", "self.dateFormat = dateFormat self.useTor = useTor self.torIP = torIP self.torPort = torPort self.redirectLimit", "parser.get('Viper', 'apitoken') return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit,", "= parser.get('Core', 'useragent') outputFolderName = parser.get('Core', 'outputfolder') outputFolder = os.path.join(rootDir, outputFolderName) deleteOutput =", "getBaseConfig(rootDir): parser = SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess = parser.get('Core', 'multiprocess') userAgent =", "'apikey') shodanKey = parser.get('Shodan', 'apikey') vtKey = parser.get('VirusTotal', 'apikey') vtUser = parser.get('VirusTotal', 'username')", "userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey,", "malShareApiKey self.disableMalShare = disableMalShare self.disableOsint = disableOsint self.otxKey = otxKey self.shodanKey = shodanKey", "def getBaseConfig(rootDir): parser = SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess = parser.get('Core', 'multiprocess') userAgent", "osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken): self.multiProcess", "parser.get('VirusTotal', 'apikey') vtUser = parser.get('VirusTotal', 'username') vtReqPerMin = parser.get('VirusTotal', 'requestsperminute') disableVT = parser.get('VirusTotal',", "self.multiProcess = multiProcess self.userAgent = userAgent self.outputFolderName = outputFolderName self.outputFolder = outputFolder self.deleteOutput", "= parser.get('Core', 'torport') redirectLimit = parser.get('Core', 'redirectlimit') hashCountLimit = parser.get('Core', 'hashcountlimit') urlCharLimit =", "outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare,", "vtUser self.vtReqPerMin = vtReqPerMin self.disableVT = disableVT self.viperUrlAdd = viperUrlAdd self.viperApiToken = 'Token", "parser.get('Core', 'redirectlimit') hashCountLimit = parser.get('Core', 'hashcountlimit') urlCharLimit = parser.get('Core', 'urlcharlimit') osintDays = parser.get('Core',", "self.redirectLimit = redirectLimit self.hashCountLimit = hashCountLimit self.urlCharLimit = urlCharLimit self.osintDays = osintDays self.malShareApiKey", "= parser.get('Core', 'osintdays') malShareApiKey = parser.get('MalShare', 'apikey') disableMalShare = parser.get('MalShare', 'disable') disableOsint =", "ConfigParser import SafeConfigParser import os import string class baseObj: def __init__(self, multiProcess, userAgent,", "import os import string class baseObj: def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput,", "vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken): self.multiProcess = multiProcess self.userAgent = userAgent self.outputFolderName", "redirectLimit self.hashCountLimit = hashCountLimit self.urlCharLimit = urlCharLimit self.osintDays = osintDays self.malShareApiKey = malShareApiKey", "osintDays = parser.get('Core', 'osintdays') malShareApiKey = parser.get('MalShare', 'apikey') disableMalShare = parser.get('MalShare', 'disable') disableOsint", "vtUser = parser.get('VirusTotal', 'username') vtReqPerMin = parser.get('VirusTotal', 'requestsperminute') disableVT = parser.get('VirusTotal', 'disable') viperUrlAdd", "parser.get('Core', 'torport') redirectLimit = parser.get('Core', 'redirectlimit') hashCountLimit = parser.get('Core', 'hashcountlimit') urlCharLimit = parser.get('Core',", "self.outputFolderName = outputFolderName self.outputFolder = outputFolder self.deleteOutput = deleteOutput self.dateFormat = dateFormat self.useTor", "= disableOsint self.otxKey = otxKey self.shodanKey = shodanKey self.vtKey = vtKey self.vtUser =", "disableVT = parser.get('VirusTotal', 'disable') viperUrlAdd = parser.get('Viper', 'addurl') viperApiToken = parser.get('Viper', 'apitoken') return", "parser.get('Core', 'deleteoutput') dateFormat = parser.get('Core', 'dateformat') useTor = parser.get('Core', 'usetor') torIP = parser.get('Core',", "= outputFolder self.deleteOutput = deleteOutput self.dateFormat = dateFormat self.useTor = useTor self.torIP =", "parser.get('Core', 'outputfolder') outputFolder = os.path.join(rootDir, outputFolderName) deleteOutput = parser.get('Core', 'deleteoutput') dateFormat = parser.get('Core',", "= torPort self.redirectLimit = redirectLimit self.hashCountLimit = hashCountLimit self.urlCharLimit = urlCharLimit self.osintDays =", "{0}'.format(viperApiToken) def getBaseConfig(rootDir): parser = SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess = parser.get('Core', 'multiprocess')", "= parser.get('Core', 'usetor') torIP = parser.get('Core', 'torip') torPort = parser.get('Core', 'torport') redirectLimit =", "'addurl') viperApiToken = parser.get('Viper', 'apitoken') return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor,", "= dateFormat self.useTor = useTor self.torIP = torIP self.torPort = torPort self.redirectLimit =", "multiProcess self.userAgent = userAgent self.outputFolderName = outputFolderName self.outputFolder = outputFolder self.deleteOutput = deleteOutput", "urlCharLimit self.osintDays = osintDays self.malShareApiKey = malShareApiKey self.disableMalShare = disableMalShare self.disableOsint = disableOsint", "= parser.get('OSINT', 'disable') otxKey = parser.get('OTX', 'apikey') shodanKey = parser.get('Shodan', 'apikey') vtKey =", "#!/usr/bin/python from ConfigParser import SafeConfigParser import os import string class baseObj: def __init__(self,", "self.outputFolder = outputFolder self.deleteOutput = deleteOutput self.dateFormat = dateFormat self.useTor = useTor self.torIP", "torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin,", "dateFormat self.useTor = useTor self.torIP = torIP self.torPort = torPort self.redirectLimit = redirectLimit", "torPort self.redirectLimit = redirectLimit self.hashCountLimit = hashCountLimit self.urlCharLimit = urlCharLimit self.osintDays = osintDays", "deleteOutput = parser.get('Core', 'deleteoutput') dateFormat = parser.get('Core', 'dateformat') useTor = parser.get('Core', 'usetor') torIP", "useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey,", "otxKey = parser.get('OTX', 'apikey') shodanKey = parser.get('Shodan', 'apikey') vtKey = parser.get('VirusTotal', 'apikey') vtUser", "redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT,", "self.shodanKey = shodanKey self.vtKey = vtKey self.vtUser = vtUser self.vtReqPerMin = vtReqPerMin self.disableVT", "vtKey = parser.get('VirusTotal', 'apikey') vtUser = parser.get('VirusTotal', 'username') vtReqPerMin = parser.get('VirusTotal', 'requestsperminute') disableVT", "'apitoken') return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit,", "SafeConfigParser import os import string class baseObj: def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder,", "'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir): parser = SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess = parser.get('Core',", "return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit,", "baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays,", "parser.get('OTX', 'apikey') shodanKey = parser.get('Shodan', 'apikey') vtKey = parser.get('VirusTotal', 'apikey') vtUser = parser.get('VirusTotal',", "= viperUrlAdd self.viperApiToken = 'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir): parser = SafeConfigParser() parser.read(os.path.join(rootDir, 'config',", "= parser.get('Core', 'outputfolder') outputFolder = os.path.join(rootDir, outputFolderName) deleteOutput = parser.get('Core', 'deleteoutput') dateFormat =", "= os.path.join(rootDir, outputFolderName) deleteOutput = parser.get('Core', 'deleteoutput') dateFormat = parser.get('Core', 'dateformat') useTor =", "self.torPort = torPort self.redirectLimit = redirectLimit self.hashCountLimit = hashCountLimit self.urlCharLimit = urlCharLimit self.osintDays", "parser.get('OSINT', 'disable') otxKey = parser.get('OTX', 'apikey') shodanKey = parser.get('Shodan', 'apikey') vtKey = parser.get('VirusTotal',", "os import string class baseObj: def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat,", "self.otxKey = otxKey self.shodanKey = shodanKey self.vtKey = vtKey self.vtUser = vtUser self.vtReqPerMin", "= outputFolderName self.outputFolder = outputFolder self.deleteOutput = deleteOutput self.dateFormat = dateFormat self.useTor =", "= parser.get('VirusTotal', 'disable') viperUrlAdd = parser.get('Viper', 'addurl') viperApiToken = parser.get('Viper', 'apitoken') return baseObj(multiProcess,", "viperApiToken): self.multiProcess = multiProcess self.userAgent = userAgent self.outputFolderName = outputFolderName self.outputFolder = outputFolder", "otxKey self.shodanKey = shodanKey self.vtKey = vtKey self.vtUser = vtUser self.vtReqPerMin = vtReqPerMin", "'useragent') outputFolderName = parser.get('Core', 'outputfolder') outputFolder = os.path.join(rootDir, outputFolderName) deleteOutput = parser.get('Core', 'deleteoutput')", "__init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit,", "= hashCountLimit self.urlCharLimit = urlCharLimit self.osintDays = osintDays self.malShareApiKey = malShareApiKey self.disableMalShare =", "torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser,", "self.malShareApiKey = malShareApiKey self.disableMalShare = disableMalShare self.disableOsint = disableOsint self.otxKey = otxKey self.shodanKey", "outputFolderName self.outputFolder = outputFolder self.deleteOutput = deleteOutput self.dateFormat = dateFormat self.useTor = useTor", "= osintDays self.malShareApiKey = malShareApiKey self.disableMalShare = disableMalShare self.disableOsint = disableOsint self.otxKey =", "parser.get('VirusTotal', 'username') vtReqPerMin = parser.get('VirusTotal', 'requestsperminute') disableVT = parser.get('VirusTotal', 'disable') viperUrlAdd = parser.get('Viper',", "hashCountLimit self.urlCharLimit = urlCharLimit self.osintDays = osintDays self.malShareApiKey = malShareApiKey self.disableMalShare = disableMalShare", "= parser.get('Viper', 'apitoken') return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort,", "parser.get('Core', 'osintdays') malShareApiKey = parser.get('MalShare', 'apikey') disableMalShare = parser.get('MalShare', 'disable') disableOsint = parser.get('OSINT',", "viperUrlAdd self.viperApiToken = 'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir): parser = SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf'))", "= torIP self.torPort = torPort self.redirectLimit = redirectLimit self.hashCountLimit = hashCountLimit self.urlCharLimit =", "self.vtKey = vtKey self.vtUser = vtUser self.vtReqPerMin = vtReqPerMin self.disableVT = disableVT self.viperUrlAdd", "'osintdays') malShareApiKey = parser.get('MalShare', 'apikey') disableMalShare = parser.get('MalShare', 'disable') disableOsint = parser.get('OSINT', 'disable')", "disableMalShare self.disableOsint = disableOsint self.otxKey = otxKey self.shodanKey = shodanKey self.vtKey = vtKey", "'apikey') vtKey = parser.get('VirusTotal', 'apikey') vtUser = parser.get('VirusTotal', 'username') vtReqPerMin = parser.get('VirusTotal', 'requestsperminute')", "import string class baseObj: def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor,", "= disableMalShare self.disableOsint = disableOsint self.otxKey = otxKey self.shodanKey = shodanKey self.vtKey =", "multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays,", "parser.get('Shodan', 'apikey') vtKey = parser.get('VirusTotal', 'apikey') vtUser = parser.get('VirusTotal', 'username') vtReqPerMin = parser.get('VirusTotal',", "parser.get('Core', 'torip') torPort = parser.get('Core', 'torport') redirectLimit = parser.get('Core', 'redirectlimit') hashCountLimit = parser.get('Core',", "viperUrlAdd, viperApiToken): self.multiProcess = multiProcess self.userAgent = userAgent self.outputFolderName = outputFolderName self.outputFolder =", "malShareApiKey = parser.get('MalShare', 'apikey') disableMalShare = parser.get('MalShare', 'disable') disableOsint = parser.get('OSINT', 'disable') otxKey", "'torip') torPort = parser.get('Core', 'torport') redirectLimit = parser.get('Core', 'redirectlimit') hashCountLimit = parser.get('Core', 'hashcountlimit')", "= vtKey self.vtUser = vtUser self.vtReqPerMin = vtReqPerMin self.disableVT = disableVT self.viperUrlAdd =", "'multiprocess') userAgent = parser.get('Core', 'useragent') outputFolderName = parser.get('Core', 'outputfolder') outputFolder = os.path.join(rootDir, outputFolderName)", "= parser.get('VirusTotal', 'username') vtReqPerMin = parser.get('VirusTotal', 'requestsperminute') disableVT = parser.get('VirusTotal', 'disable') viperUrlAdd =", "'redirectlimit') hashCountLimit = parser.get('Core', 'hashcountlimit') urlCharLimit = parser.get('Core', 'urlcharlimit') osintDays = parser.get('Core', 'osintdays')", "otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken): self.multiProcess = multiProcess self.userAgent =", "= useTor self.torIP = torIP self.torPort = torPort self.redirectLimit = redirectLimit self.hashCountLimit =", "= SafeConfigParser() parser.read(os.path.join(rootDir, 'config', 'settings.conf')) multiProcess = parser.get('Core', 'multiprocess') userAgent = parser.get('Core', 'useragent')", "parser.get('MalShare', 'disable') disableOsint = parser.get('OSINT', 'disable') otxKey = parser.get('OTX', 'apikey') shodanKey = parser.get('Shodan',", "'disable') otxKey = parser.get('OTX', 'apikey') shodanKey = parser.get('Shodan', 'apikey') vtKey = parser.get('VirusTotal', 'apikey')", "'requestsperminute') disableVT = parser.get('VirusTotal', 'disable') viperUrlAdd = parser.get('Viper', 'addurl') viperApiToken = parser.get('Viper', 'apitoken')", "outputFolder self.deleteOutput = deleteOutput self.dateFormat = dateFormat self.useTor = useTor self.torIP = torIP", "outputFolderName = parser.get('Core', 'outputfolder') outputFolder = os.path.join(rootDir, outputFolderName) deleteOutput = parser.get('Core', 'deleteoutput') dateFormat", "vtReqPerMin self.disableVT = disableVT self.viperUrlAdd = viperUrlAdd self.viperApiToken = 'Token {0}'.format(viperApiToken) def getBaseConfig(rootDir):", "= parser.get('Shodan', 'apikey') vtKey = parser.get('VirusTotal', 'apikey') vtUser = parser.get('VirusTotal', 'username') vtReqPerMin =", "hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd,", "outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint," ]
[ "= line[line.rindex(\"\\t\")+1:].strip() # Strip metadata (timestamps, speaker names) lines.append({\"text\": l}) return lines def", "as f: for line in f: l = line[line.rindex(\"\\t\")+1:].strip() # Strip metadata (timestamps,", "Corpus http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData: \"\"\" \"\"\" def __init__(self, dirName): \"\"\" Args: dirName", "loadLines(self, fileName): \"\"\" Args: fileName (str): file to load Return: list<dict<str>>: the extracted", "\"\"\" Ubuntu Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData: \"\"\" \"\"\" def __init__(self, dirName):", "corpus \"\"\" self.MAX_NUMBER_SUBDIR = 10 self.conversations = [] __dir = os.path.join(dirName, \"dialogs\") number_subdir", "\"\"\" \"\"\" def __init__(self, dirName): \"\"\" Args: dirName (string): directory where to load", "http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData: \"\"\" \"\"\" def __init__(self, dirName): \"\"\" Args: dirName (string):", "\"\"\" self.MAX_NUMBER_SUBDIR = 10 self.conversations = [] __dir = os.path.join(dirName, \"dialogs\") number_subdir =", "\"\"\" Args: dirName (string): directory where to load the corpus \"\"\" self.MAX_NUMBER_SUBDIR =", "open(fileName, 'r') as f: for line in f: l = line[line.rindex(\"\\t\")+1:].strip() # Strip", "__init__(self, dirName): \"\"\" Args: dirName (string): directory where to load the corpus \"\"\"", "dirName (string): directory where to load the corpus \"\"\" self.MAX_NUMBER_SUBDIR = 10 self.conversations", "f: for line in f: l = line[line.rindex(\"\\t\")+1:].strip() # Strip metadata (timestamps, speaker", "if number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping, only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return if", "<gh_stars>1-10 import os from tqdm import tqdm \"\"\" Ubuntu Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\"", "extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir(): number_subdir += 1 for f in os.scandir(sub.path):", "+= 1 for f in os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self, fileName):", "1 for f in os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self, fileName): \"\"\"", "== self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping, only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir(): number_subdir", "extracted fields for each line \"\"\" lines = [] with open(fileName, 'r') as", "import tqdm \"\"\" Ubuntu Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData: \"\"\" \"\"\" def", "if sub.is_dir(): number_subdir += 1 for f in os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)})", "= 0 for sub in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))): if number_subdir ==", "Strip metadata (timestamps, speaker names) lines.append({\"text\": l}) return lines def getConversations(self): return self.conversations", "line in f: l = line[line.rindex(\"\\t\")+1:].strip() # Strip metadata (timestamps, speaker names) lines.append({\"text\":", "load Return: list<dict<str>>: the extracted fields for each line \"\"\" lines = []", "line[line.rindex(\"\\t\")+1:].strip() # Strip metadata (timestamps, speaker names) lines.append({\"text\": l}) return lines def getConversations(self):", "each line \"\"\" lines = [] with open(fileName, 'r') as f: for line", "desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))): if number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping, only extracting", "fileName): \"\"\" Args: fileName (str): file to load Return: list<dict<str>>: the extracted fields", "self.conversations = [] __dir = os.path.join(dirName, \"dialogs\") number_subdir = 0 for sub in", "return if sub.is_dir(): number_subdir += 1 for f in os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\":", "\"\"\" lines = [] with open(fileName, 'r') as f: for line in f:", "os from tqdm import tqdm \"\"\" Ubuntu Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData:", "import os from tqdm import tqdm \"\"\" Ubuntu Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\" class", "only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir(): number_subdir += 1 for f in", "in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))): if number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping,", "the corpus \"\"\" self.MAX_NUMBER_SUBDIR = 10 self.conversations = [] __dir = os.path.join(dirName, \"dialogs\")", "for f in os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self, fileName): \"\"\" Args:", "f in os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self, fileName): \"\"\" Args: fileName", "l = line[line.rindex(\"\\t\")+1:].strip() # Strip metadata (timestamps, speaker names) lines.append({\"text\": l}) return lines", "(string): directory where to load the corpus \"\"\" self.MAX_NUMBER_SUBDIR = 10 self.conversations =", "self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self, fileName): \"\"\" Args: fileName (str): file to load Return:", "self.MAX_NUMBER_SUBDIR = 10 self.conversations = [] __dir = os.path.join(dirName, \"dialogs\") number_subdir = 0", "fields for each line \"\"\" lines = [] with open(fileName, 'r') as f:", "to load the corpus \"\"\" self.MAX_NUMBER_SUBDIR = 10 self.conversations = [] __dir =", "UbuntuData: \"\"\" \"\"\" def __init__(self, dirName): \"\"\" Args: dirName (string): directory where to", "Ubuntu Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData: \"\"\" \"\"\" def __init__(self, dirName): \"\"\"", "# Strip metadata (timestamps, speaker names) lines.append({\"text\": l}) return lines def getConversations(self): return", "\"dialogs\") number_subdir = 0 for sub in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))): if", "= [] __dir = os.path.join(dirName, \"dialogs\") number_subdir = 0 for sub in tqdm(os.scandir(__dir),", "fileName (str): file to load Return: list<dict<str>>: the extracted fields for each line", "number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping, only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir():", "Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData: \"\"\" \"\"\" def __init__(self, dirName): \"\"\" Args:", "directory where to load the corpus \"\"\" self.MAX_NUMBER_SUBDIR = 10 self.conversations = []", "Args: fileName (str): file to load Return: list<dict<str>>: the extracted fields for each", "\"\"\" def __init__(self, dirName): \"\"\" Args: dirName (string): directory where to load the", "f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self, fileName): \"\"\" Args: fileName (str): file to load", "number_subdir += 1 for f in os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self,", "in os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self, fileName): \"\"\" Args: fileName (str):", "'r') as f: for line in f: l = line[line.rindex(\"\\t\")+1:].strip() # Strip metadata", "f: l = line[line.rindex(\"\\t\")+1:].strip() # Strip metadata (timestamps, speaker names) lines.append({\"text\": l}) return", "directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir(): number_subdir += 1 for f in os.scandir(sub.path): if f.name.endswith(\".tsv\"):", "number_subdir = 0 for sub in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))): if number_subdir", "for each line \"\"\" lines = [] with open(fileName, 'r') as f: for", "os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self, fileName): \"\"\" Args: fileName (str): file", "def __init__(self, dirName): \"\"\" Args: dirName (string): directory where to load the corpus", "tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))): if number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping, only", "tqdm import tqdm \"\"\" Ubuntu Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData: \"\"\" \"\"\"", "print(\"WARNING: Early stoping, only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir(): number_subdir += 1", "[] __dir = os.path.join(dirName, \"dialogs\") number_subdir = 0 for sub in tqdm(os.scandir(__dir), desc=\"Ubuntu", "10 self.conversations = [] __dir = os.path.join(dirName, \"dialogs\") number_subdir = 0 for sub", "os.path.join(dirName, \"dialogs\") number_subdir = 0 for sub in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))):", "0 for sub in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))): if number_subdir == self.MAX_NUMBER_SUBDIR:", "the extracted fields for each line \"\"\" lines = [] with open(fileName, 'r')", "load the corpus \"\"\" self.MAX_NUMBER_SUBDIR = 10 self.conversations = [] __dir = os.path.join(dirName,", "where to load the corpus \"\"\" self.MAX_NUMBER_SUBDIR = 10 self.conversations = [] __dir", "= os.path.join(dirName, \"dialogs\") number_subdir = 0 for sub in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\",", "sub.is_dir(): number_subdir += 1 for f in os.scandir(sub.path): if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def", "for line in f: l = line[line.rindex(\"\\t\")+1:].strip() # Strip metadata (timestamps, speaker names)", "dirName): \"\"\" Args: dirName (string): directory where to load the corpus \"\"\" self.MAX_NUMBER_SUBDIR", "Args: dirName (string): directory where to load the corpus \"\"\" self.MAX_NUMBER_SUBDIR = 10", "= 10 self.conversations = [] __dir = os.path.join(dirName, \"dialogs\") number_subdir = 0 for", "class UbuntuData: \"\"\" \"\"\" def __init__(self, dirName): \"\"\" Args: dirName (string): directory where", "(str): file to load Return: list<dict<str>>: the extracted fields for each line \"\"\"", "total=len(os.listdir(__dir))): if number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping, only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return", "to load Return: list<dict<str>>: the extracted fields for each line \"\"\" lines =", "with open(fileName, 'r') as f: for line in f: l = line[line.rindex(\"\\t\")+1:].strip() #", "if f.name.endswith(\".tsv\"): self.conversations.append({\"lines\": self.loadLines(f.path)}) def loadLines(self, fileName): \"\"\" Args: fileName (str): file to", "tqdm \"\"\" Ubuntu Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData: \"\"\" \"\"\" def __init__(self,", "in f: l = line[line.rindex(\"\\t\")+1:].strip() # Strip metadata (timestamps, speaker names) lines.append({\"text\": l})", "subfolders\", total=len(os.listdir(__dir))): if number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping, only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR))", "[] with open(fileName, 'r') as f: for line in f: l = line[line.rindex(\"\\t\")+1:].strip()", "dialogs subfolders\", total=len(os.listdir(__dir))): if number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping, only extracting {}", "Return: list<dict<str>>: the extracted fields for each line \"\"\" lines = [] with", "line \"\"\" lines = [] with open(fileName, 'r') as f: for line in", "def loadLines(self, fileName): \"\"\" Args: fileName (str): file to load Return: list<dict<str>>: the", "stoping, only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir(): number_subdir += 1 for f", "{} directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir(): number_subdir += 1 for f in os.scandir(sub.path): if", "for sub in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))): if number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING:", "from tqdm import tqdm \"\"\" Ubuntu Dialogue Corpus http://arxiv.org/abs/1506.08909 \"\"\" class UbuntuData: \"\"\"", "\"\"\" Args: fileName (str): file to load Return: list<dict<str>>: the extracted fields for", "sub in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs subfolders\", total=len(os.listdir(__dir))): if number_subdir == self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early", "self.loadLines(f.path)}) def loadLines(self, fileName): \"\"\" Args: fileName (str): file to load Return: list<dict<str>>:", "Early stoping, only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir(): number_subdir += 1 for", "file to load Return: list<dict<str>>: the extracted fields for each line \"\"\" lines", "= [] with open(fileName, 'r') as f: for line in f: l =", "self.MAX_NUMBER_SUBDIR: print(\"WARNING: Early stoping, only extracting {} directories\".format(self.MAX_NUMBER_SUBDIR)) return if sub.is_dir(): number_subdir +=", "lines = [] with open(fileName, 'r') as f: for line in f: l", "\"\"\" class UbuntuData: \"\"\" \"\"\" def __init__(self, dirName): \"\"\" Args: dirName (string): directory", "__dir = os.path.join(dirName, \"dialogs\") number_subdir = 0 for sub in tqdm(os.scandir(__dir), desc=\"Ubuntu dialogs", "list<dict<str>>: the extracted fields for each line \"\"\" lines = [] with open(fileName," ]
[ "a two-element tuple with X and Y coordinates of the cluster centroid; •", "This allows comparing point distances only to clusters from nearby grids, not to", "1, grid_y + 2))] def cluster_iter(clustered, point, threshold): \"\"\"Add a point to a", "return [(i, j) for i, j in product(range(grid_x - 1, grid_x + 2),", "= grid_cell return [(i, j) for i, j in product(range(grid_x - 1, grid_x", "j) for i, j in product(range(grid_x - 1, grid_x + 2), range(grid_y -", "+ 2), range(grid_y - 1, grid_y + 2))] def cluster_iter(clustered, point, threshold): \"\"\"Add", "iterable of two-element cluster tuples, each containing: • a two-element tuple with X", "cluster. Cluster’s centroid is defined as average coordinates of the cluster’s members. \"\"\"", "= nearest_cluster_with_distance else: nearest_cluster_location = None if nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster", "tuples, each containing: • a two-element tuple with X and Y coordinates, •", ") def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold): pretender_distance = distance(coords, pretender) if pretender_distance <", "list of objects belonging to the cluster. Cluster’s centroid is defined as average", "1), ) else: cluster = [] new_cluster_location = coords cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location,", "distances only to clusters from nearby grids, not to all clusters. Useful when", "/ (cluster_object_count + 1), ) else: cluster = [] new_cluster_location = coords cluster.append(point)", "threshold=threshold) clustered = reduce(cluster_iter_for_threshold, points, {}) return chain( *[((location, [object_ for coords, object_", "are many clusters expected.\"\"\" coords, object_ = point point_grid_cell = get_grid_cell(*coords, threshold=threshold) nearby_grid_cells", "clusters from nearby grids, not to all clusters. Useful when there are many", "< threshold: if champion_with_distance and champion_with_distance[1] <= pretender_distance: return champion_with_distance else: return (pretender,", "pretender_distance < threshold: if champion_with_distance and champion_with_distance[1] <= pretender_distance: return champion_with_distance else: return", "to all clusters. Useful when there are many clusters expected.\"\"\" coords, object_ =", "value. Return value: an iterable of two-element cluster tuples, each containing: • a", "pretender_distance: return champion_with_distance else: return (pretender, pretender_distance) else: return champion_with_distance nearest_cluster_with_distance = reduce(nearest_location,", "return (pretender, pretender_distance) else: return champion_with_distance nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None) if nearest_cluster_with_distance:", "pow(coords_1[1] - coords_2[1], 2)) def get_grid_cell(x, y, threshold): return (int(x // threshold), int(y", "[(i, j) for i, j in product(range(grid_x - 1, grid_x + 2), range(grid_y", "from nearby grids, not to all clusters. Useful when there are many clusters", "= reduce(cluster_iter_for_threshold, points, {}) return chain( *[((location, [object_ for coords, object_ in points])", "Return value: an iterable of two-element cluster tuples, each containing: • a two-element", "distance-based clustering algorithm. Arguments: points — an iterable of two-element point tuples, each", "reduce(cluster_iter_for_threshold, points, {}) return chain( *[((location, [object_ for coords, object_ in points]) for", "of two-element point tuples, each containing: • a two-element tuple with X and", "comparing point distances only to clusters from nearby grids, not to all clusters.", "cluster tuples, each containing: • a two-element tuple with X and Y coordinates", "coords=coords, threshold=threshold): pretender_distance = distance(coords, pretender) if pretender_distance < threshold: if champion_with_distance and", "clustering algorithm. Arguments: points — an iterable of two-element point tuples, each containing:", "if a point is included into a cluster, it must be closer to", "not to all clusters. Useful when there are many clusters expected.\"\"\" coords, object_", "else: nearest_cluster_location = None if nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location)", "be closer to its centroid than this value. Return value: an iterable of", "object being clustered; threshold — if a point is included into a cluster,", "2), range(grid_y - 1, grid_y + 2))] def cluster_iter(clustered, point, threshold): \"\"\"Add a", "iterable of two-element point tuples, each containing: • a two-element tuple with X", "coords_2[0], 2) + pow(coords_1[1] - coords_2[1], 2)) def get_grid_cell(x, y, threshold): return (int(x", "= None if nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count =", "point, threshold): \"\"\"Add a point to a grid-like cluster structure. This allows comparing", "j in product(range(grid_x - 1, grid_x + 2), range(grid_y - 1, grid_y +", "get_nearby_grid_cells(grid_cell): grid_x, grid_y = grid_cell return [(i, j) for i, j in product(range(grid_x", "- 1, grid_x + 2), range(grid_y - 1, grid_y + 2))] def cluster_iter(clustered,", "reduce from itertools import chain, product from math import sqrt def distance(coords_1, coords_2):", "to its centroid than this value. Return value: an iterable of two-element cluster", "champion_with_distance nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None) if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance else:", "cluster_iter(clustered, point, threshold): \"\"\"Add a point to a grid-like cluster structure. This allows", "actual object being clustered; threshold — if a point is included into a", "get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster return clustered def cluster(points, threshold): \"\"\"Cluster", "champion_with_distance else: return (pretender, pretender_distance) else: return champion_with_distance nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None)", "pretender, coords=coords, threshold=threshold): pretender_distance = distance(coords, pretender) if pretender_distance < threshold: if champion_with_distance", "nearby grids, not to all clusters. Useful when there are many clusters expected.\"\"\"", "for coords, object_ in points]) for location, points in grid_clusters.items()) for grid_clusters in", "( (nearest_cluster_location[0] * cluster_object_count + coords[0]) / (cluster_object_count + 1), (nearest_cluster_location[1] * cluster_object_count", "threshold): \"\"\"Add a point to a grid-like cluster structure. This allows comparing point", "algorithm. Arguments: points — an iterable of two-element point tuples, each containing: •", "def cluster(points, threshold): \"\"\"Cluster points using distance-based clustering algorithm. Arguments: points — an", "cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster return clustered def", "threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster) new_cluster_location = ( (nearest_cluster_location[0] * cluster_object_count", "from math import sqrt def distance(coords_1, coords_2): return sqrt(pow(coords_1[0] - coords_2[0], 2) +", "a cluster, it must be closer to its centroid than this value. Return", "clustered; threshold — if a point is included into a cluster, it must", "being clustered; threshold — if a point is included into a cluster, it", "Useful when there are many clusters expected.\"\"\" coords, object_ = point point_grid_cell =", "def get_nearby_grid_cells(grid_cell): grid_x, grid_y = grid_cell return [(i, j) for i, j in", "reduce(nearest_location, possible_nearby_cluster_locations, None) if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance else: nearest_cluster_location = None", "nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance else: nearest_cluster_location = None if nearest_cluster_location: cluster_grid_cell =", "coords cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster return clustered", "chain( *[(location for location in clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells] ) def", "Arguments: points — an iterable of two-element point tuples, each containing: • a", "import sqrt def distance(coords_1, coords_2): return sqrt(pow(coords_1[0] - coords_2[0], 2) + pow(coords_1[1] -", "only to clusters from nearby grids, not to all clusters. Useful when there", "if nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster) new_cluster_location", "an iterable of two-element point tuples, each containing: • a two-element tuple with", "two-element tuple with X and Y coordinates of the cluster centroid; • a", "distance(coords_1, coords_2): return sqrt(pow(coords_1[0] - coords_2[0], 2) + pow(coords_1[1] - coords_2[1], 2)) def", "return champion_with_distance nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None) if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance", "clusters expected.\"\"\" coords, object_ = point point_grid_cell = get_grid_cell(*coords, threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell)", "Y coordinates of the cluster centroid; • a list of objects belonging to", "= get_grid_cell(*coords, threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain( *[(location for location in", "grid_x, grid_y = grid_cell return [(i, j) for i, j in product(range(grid_x -", "of objects belonging to the cluster. Cluster’s centroid is defined as average coordinates", "else: return champion_with_distance nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None) if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance =", "coords[1]) / (cluster_object_count + 1), ) else: cluster = [] new_cluster_location = coords", "coordinates of the cluster’s members. \"\"\" cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold) clustered = reduce(cluster_iter_for_threshold,", "2))] def cluster_iter(clustered, point, threshold): \"\"\"Add a point to a grid-like cluster structure.", "new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster return clustered def cluster(points,", "cluster return clustered def cluster(points, threshold): \"\"\"Cluster points using distance-based clustering algorithm. Arguments:", "point tuples, each containing: • a two-element tuple with X and Y coordinates,", "object_ = point point_grid_cell = get_grid_cell(*coords, threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain(", "for i, j in product(range(grid_x - 1, grid_x + 2), range(grid_y - 1,", "coords, object_ in points]) for location, points in grid_clusters.items()) for grid_clusters in clustered.values()]", "a point to a grid-like cluster structure. This allows comparing point distances only", "int(y // threshold)) def get_nearby_grid_cells(grid_cell): grid_x, grid_y = grid_cell return [(i, j) for", "when there are many clusters expected.\"\"\" coords, object_ = point point_grid_cell = get_grid_cell(*coords,", "there are many clusters expected.\"\"\" coords, object_ = point point_grid_cell = get_grid_cell(*coords, threshold=threshold)", "= point point_grid_cell = get_grid_cell(*coords, threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain( *[(location", "and Y coordinates, • the actual object being clustered; threshold — if a", "grid_cell in nearby_grid_cells] ) def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold): pretender_distance = distance(coords, pretender)", "grid_cell return [(i, j) for i, j in product(range(grid_x - 1, grid_x +", "• a list of objects belonging to the cluster. Cluster’s centroid is defined", "pretender) if pretender_distance < threshold: if champion_with_distance and champion_with_distance[1] <= pretender_distance: return champion_with_distance", "math import sqrt def distance(coords_1, coords_2): return sqrt(pow(coords_1[0] - coords_2[0], 2) + pow(coords_1[1]", "a list of objects belonging to the cluster. Cluster’s centroid is defined as", "coords_2[1], 2)) def get_grid_cell(x, y, threshold): return (int(x // threshold), int(y // threshold))", "belonging to the cluster. Cluster’s centroid is defined as average coordinates of the", "must be closer to its centroid than this value. Return value: an iterable", "<filename>yacluster.py from functools import partial, reduce from itertools import chain, product from math", "(cluster_object_count + 1), (nearest_cluster_location[1] * cluster_object_count + coords[1]) / (cluster_object_count + 1), )", "i, j in product(range(grid_x - 1, grid_x + 2), range(grid_y - 1, grid_y", "value: an iterable of two-element cluster tuples, each containing: • a two-element tuple", "+ coords[0]) / (cluster_object_count + 1), (nearest_cluster_location[1] * cluster_object_count + coords[1]) / (cluster_object_count", "an iterable of two-element cluster tuples, each containing: • a two-element tuple with", "nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None) if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance else: nearest_cluster_location", "def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold): pretender_distance = distance(coords, pretender) if pretender_distance < threshold:", "Y coordinates, • the actual object being clustered; threshold — if a point", "functools import partial, reduce from itertools import chain, product from math import sqrt", "None if nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster)", "clustered[new_cluster_grid_cell][new_cluster_location] = cluster return clustered def cluster(points, threshold): \"\"\"Cluster points using distance-based clustering", "(int(x // threshold), int(y // threshold)) def get_nearby_grid_cells(grid_cell): grid_x, grid_y = grid_cell return", "in product(range(grid_x - 1, grid_x + 2), range(grid_y - 1, grid_y + 2))]", "location in clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells] ) def nearest_location(champion_with_distance, pretender, coords=coords,", "= ( (nearest_cluster_location[0] * cluster_object_count + coords[0]) / (cluster_object_count + 1), (nearest_cluster_location[1] *", "— if a point is included into a cluster, it must be closer", "the cluster’s members. \"\"\" cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold) clustered = reduce(cluster_iter_for_threshold, points, {})", "+ coords[1]) / (cluster_object_count + 1), ) else: cluster = [] new_cluster_location =", "and champion_with_distance[1] <= pretender_distance: return champion_with_distance else: return (pretender, pretender_distance) else: return champion_with_distance", "cluster, it must be closer to its centroid than this value. Return value:", "import chain, product from math import sqrt def distance(coords_1, coords_2): return sqrt(pow(coords_1[0] -", "included into a cluster, it must be closer to its centroid than this", "two-element point tuples, each containing: • a two-element tuple with X and Y", "clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster return clustered def cluster(points, threshold): \"\"\"Cluster points using", "points using distance-based clustering algorithm. Arguments: points — an iterable of two-element point", "many clusters expected.\"\"\" coords, object_ = point point_grid_cell = get_grid_cell(*coords, threshold=threshold) nearby_grid_cells =", "nearby_grid_cells] ) def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold): pretender_distance = distance(coords, pretender) if pretender_distance", "coords_2): return sqrt(pow(coords_1[0] - coords_2[0], 2) + pow(coords_1[1] - coords_2[1], 2)) def get_grid_cell(x,", "1), (nearest_cluster_location[1] * cluster_object_count + coords[1]) / (cluster_object_count + 1), ) else: cluster", "+ pow(coords_1[1] - coords_2[1], 2)) def get_grid_cell(x, y, threshold): return (int(x // threshold),", "partial(cluster_iter, threshold=threshold) clustered = reduce(cluster_iter_for_threshold, points, {}) return chain( *[((location, [object_ for coords,", "*[(location for location in clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells] ) def nearest_location(champion_with_distance,", "2) + pow(coords_1[1] - coords_2[1], 2)) def get_grid_cell(x, y, threshold): return (int(x //", "— an iterable of two-element point tuples, each containing: • a two-element tuple", "as average coordinates of the cluster’s members. \"\"\" cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold) clustered", "containing: • a two-element tuple with X and Y coordinates of the cluster", "if pretender_distance < threshold: if champion_with_distance and champion_with_distance[1] <= pretender_distance: return champion_with_distance else:", "than this value. Return value: an iterable of two-element cluster tuples, each containing:", "partial, reduce from itertools import chain, product from math import sqrt def distance(coords_1,", "// threshold), int(y // threshold)) def get_nearby_grid_cells(grid_cell): grid_x, grid_y = grid_cell return [(i,", "\"\"\"Cluster points using distance-based clustering algorithm. Arguments: points — an iterable of two-element", "+ 1), (nearest_cluster_location[1] * cluster_object_count + coords[1]) / (cluster_object_count + 1), ) else:", "expected.\"\"\" coords, object_ = point point_grid_cell = get_grid_cell(*coords, threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations", "1, grid_x + 2), range(grid_y - 1, grid_y + 2))] def cluster_iter(clustered, point,", "def get_grid_cell(x, y, threshold): return (int(x // threshold), int(y // threshold)) def get_nearby_grid_cells(grid_cell):", "def distance(coords_1, coords_2): return sqrt(pow(coords_1[0] - coords_2[0], 2) + pow(coords_1[1] - coords_2[1], 2))", "{}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster return clustered def cluster(points, threshold): \"\"\"Cluster points using distance-based", "* cluster_object_count + coords[1]) / (cluster_object_count + 1), ) else: cluster = []", "if champion_with_distance and champion_with_distance[1] <= pretender_distance: return champion_with_distance else: return (pretender, pretender_distance) else:", "members. \"\"\" cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold) clustered = reduce(cluster_iter_for_threshold, points, {}) return chain(", "closer to its centroid than this value. Return value: an iterable of two-element", "coords[0]) / (cluster_object_count + 1), (nearest_cluster_location[1] * cluster_object_count + coords[1]) / (cluster_object_count +", "point is included into a cluster, it must be closer to its centroid", "coords, object_ = point point_grid_cell = get_grid_cell(*coords, threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations =", "= get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain( *[(location for location in clustered.get(grid_cell, {})) for grid_cell", "tuple with X and Y coordinates of the cluster centroid; • a list", "return chain( *[((location, [object_ for coords, object_ in points]) for location, points in", "= get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster return clustered def cluster(points, threshold):", "from itertools import chain, product from math import sqrt def distance(coords_1, coords_2): return", "containing: • a two-element tuple with X and Y coordinates, • the actual", "{}) return chain( *[((location, [object_ for coords, object_ in points]) for location, points", "2)) def get_grid_cell(x, y, threshold): return (int(x // threshold), int(y // threshold)) def", "to clusters from nearby grids, not to all clusters. Useful when there are", "(nearest_cluster_location[0] * cluster_object_count + coords[0]) / (cluster_object_count + 1), (nearest_cluster_location[1] * cluster_object_count +", "if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance else: nearest_cluster_location = None if nearest_cluster_location: cluster_grid_cell", "return (int(x // threshold), int(y // threshold)) def get_nearby_grid_cells(grid_cell): grid_x, grid_y = grid_cell", "- coords_2[0], 2) + pow(coords_1[1] - coords_2[1], 2)) def get_grid_cell(x, y, threshold): return", "\"\"\"Add a point to a grid-like cluster structure. This allows comparing point distances", "get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain( *[(location for location in clustered.get(grid_cell, {})) for grid_cell in", "clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells] ) def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold): pretender_distance", "its centroid than this value. Return value: an iterable of two-element cluster tuples,", "[object_ for coords, object_ in points]) for location, points in grid_clusters.items()) for grid_clusters", "from functools import partial, reduce from itertools import chain, product from math import", "grid_x + 2), range(grid_y - 1, grid_y + 2))] def cluster_iter(clustered, point, threshold):", "+ 1), ) else: cluster = [] new_cluster_location = coords cluster.append(point) new_cluster_grid_cell =", "• the actual object being clustered; threshold — if a point is included", "points, {}) return chain( *[((location, [object_ for coords, object_ in points]) for location,", "two-element tuple with X and Y coordinates, • the actual object being clustered;", "a grid-like cluster structure. This allows comparing point distances only to clusters from", "import partial, reduce from itertools import chain, product from math import sqrt def", "centroid; • a list of objects belonging to the cluster. Cluster’s centroid is", "objects belonging to the cluster. Cluster’s centroid is defined as average coordinates of", "point to a grid-like cluster structure. This allows comparing point distances only to", ") else: cluster = [] new_cluster_location = coords cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold)", "tuples, each containing: • a two-element tuple with X and Y coordinates of", "allows comparing point distances only to clusters from nearby grids, not to all", "new_cluster_location = ( (nearest_cluster_location[0] * cluster_object_count + coords[0]) / (cluster_object_count + 1), (nearest_cluster_location[1]", "= get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster) new_cluster_location = ( (nearest_cluster_location[0]", "a point is included into a cluster, it must be closer to its", "= len(cluster) new_cluster_location = ( (nearest_cluster_location[0] * cluster_object_count + coords[0]) / (cluster_object_count +", "into a cluster, it must be closer to its centroid than this value.", "of two-element cluster tuples, each containing: • a two-element tuple with X and", "= cluster return clustered def cluster(points, threshold): \"\"\"Cluster points using distance-based clustering algorithm.", "cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold) clustered = reduce(cluster_iter_for_threshold, points, {}) return chain( *[((location, [object_", "threshold — if a point is included into a cluster, it must be", "cluster structure. This allows comparing point distances only to clusters from nearby grids,", "champion_with_distance[1] <= pretender_distance: return champion_with_distance else: return (pretender, pretender_distance) else: return champion_with_distance nearest_cluster_with_distance", "coordinates, • the actual object being clustered; threshold — if a point is", "product(range(grid_x - 1, grid_x + 2), range(grid_y - 1, grid_y + 2))] def", "it must be closer to its centroid than this value. Return value: an", "all clusters. Useful when there are many clusters expected.\"\"\" coords, object_ = point", "each containing: • a two-element tuple with X and Y coordinates, • the", "_nearest_cluster_distance = nearest_cluster_with_distance else: nearest_cluster_location = None if nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold)", "cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster) new_cluster_location = (", "cluster_object_count = len(cluster) new_cluster_location = ( (nearest_cluster_location[0] * cluster_object_count + coords[0]) / (cluster_object_count", "to the cluster. Cluster’s centroid is defined as average coordinates of the cluster’s", "centroid is defined as average coordinates of the cluster’s members. \"\"\" cluster_iter_for_threshold =", "= coords cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster return", "= chain( *[(location for location in clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells] )", "= clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster) new_cluster_location = ( (nearest_cluster_location[0] * cluster_object_count + coords[0])", "clusters. Useful when there are many clusters expected.\"\"\" coords, object_ = point point_grid_cell", "threshold), int(y // threshold)) def get_nearby_grid_cells(grid_cell): grid_x, grid_y = grid_cell return [(i, j)", "nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain( *[(location for location in clustered.get(grid_cell, {})) for", "product from math import sqrt def distance(coords_1, coords_2): return sqrt(pow(coords_1[0] - coords_2[0], 2)", "return sqrt(pow(coords_1[0] - coords_2[0], 2) + pow(coords_1[1] - coords_2[1], 2)) def get_grid_cell(x, y,", "pretender_distance) else: return champion_with_distance nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None) if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance", "point_grid_cell = get_grid_cell(*coords, threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain( *[(location for location", "chain, product from math import sqrt def distance(coords_1, coords_2): return sqrt(pow(coords_1[0] - coords_2[0],", "threshold): \"\"\"Cluster points using distance-based clustering algorithm. Arguments: points — an iterable of", "for location in clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells] ) def nearest_location(champion_with_distance, pretender,", "grid-like cluster structure. This allows comparing point distances only to clusters from nearby", "cluster = [] new_cluster_location = coords cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {})", "= reduce(nearest_location, possible_nearby_cluster_locations, None) if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance else: nearest_cluster_location =", "[] new_cluster_location = coords cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] =", "the actual object being clustered; threshold — if a point is included into", "centroid than this value. Return value: an iterable of two-element cluster tuples, each", "each containing: • a two-element tuple with X and Y coordinates of the", "threshold=threshold): pretender_distance = distance(coords, pretender) if pretender_distance < threshold: if champion_with_distance and champion_with_distance[1]", "pretender_distance = distance(coords, pretender) if pretender_distance < threshold: if champion_with_distance and champion_with_distance[1] <=", "of the cluster’s members. \"\"\" cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold) clustered = reduce(cluster_iter_for_threshold, points,", "possible_nearby_cluster_locations = chain( *[(location for location in clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells]", "def cluster_iter(clustered, point, threshold): \"\"\"Add a point to a grid-like cluster structure. This", "grid_y = grid_cell return [(i, j) for i, j in product(range(grid_x - 1,", "of the cluster centroid; • a list of objects belonging to the cluster.", "to a grid-like cluster structure. This allows comparing point distances only to clusters", "threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster return clustered def cluster(points, threshold): \"\"\"Cluster points", "cluster_object_count + coords[0]) / (cluster_object_count + 1), (nearest_cluster_location[1] * cluster_object_count + coords[1]) /", "sqrt def distance(coords_1, coords_2): return sqrt(pow(coords_1[0] - coords_2[0], 2) + pow(coords_1[1] - coords_2[1],", "= partial(cluster_iter, threshold=threshold) clustered = reduce(cluster_iter_for_threshold, points, {}) return chain( *[((location, [object_ for", "nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance else: nearest_cluster_location = None if nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location,", "this value. Return value: an iterable of two-element cluster tuples, each containing: •", "threshold)) def get_nearby_grid_cells(grid_cell): grid_x, grid_y = grid_cell return [(i, j) for i, j", "<= pretender_distance: return champion_with_distance else: return (pretender, pretender_distance) else: return champion_with_distance nearest_cluster_with_distance =", "* cluster_object_count + coords[0]) / (cluster_object_count + 1), (nearest_cluster_location[1] * cluster_object_count + coords[1])", "tuple with X and Y coordinates, • the actual object being clustered; threshold", "Cluster’s centroid is defined as average coordinates of the cluster’s members. \"\"\" cluster_iter_for_threshold", "get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster) new_cluster_location = ( (nearest_cluster_location[0] *", "- 1, grid_y + 2))] def cluster_iter(clustered, point, threshold): \"\"\"Add a point to", "\"\"\" cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold) clustered = reduce(cluster_iter_for_threshold, points, {}) return chain( *[((location,", "X and Y coordinates, • the actual object being clustered; threshold — if", "// threshold)) def get_nearby_grid_cells(grid_cell): grid_x, grid_y = grid_cell return [(i, j) for i,", "return clustered def cluster(points, threshold): \"\"\"Cluster points using distance-based clustering algorithm. Arguments: points", "{})) for grid_cell in nearby_grid_cells] ) def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold): pretender_distance =", "champion_with_distance and champion_with_distance[1] <= pretender_distance: return champion_with_distance else: return (pretender, pretender_distance) else: return", "clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster) new_cluster_location = ( (nearest_cluster_location[0] * cluster_object_count + coords[0]) /", "average coordinates of the cluster’s members. \"\"\" cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold) clustered =", "point point_grid_cell = get_grid_cell(*coords, threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain( *[(location for", "new_cluster_location = coords cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location] = cluster", "is defined as average coordinates of the cluster’s members. \"\"\" cluster_iter_for_threshold = partial(cluster_iter,", "the cluster. Cluster’s centroid is defined as average coordinates of the cluster’s members.", "is included into a cluster, it must be closer to its centroid than", "chain( *[((location, [object_ for coords, object_ in points]) for location, points in grid_clusters.items())", "nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold): pretender_distance = distance(coords, pretender) if pretender_distance < threshold: if", "else: return (pretender, pretender_distance) else: return champion_with_distance nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None) if", "sqrt(pow(coords_1[0] - coords_2[0], 2) + pow(coords_1[1] - coords_2[1], 2)) def get_grid_cell(x, y, threshold):", "threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain( *[(location for location in clustered.get(grid_cell, {}))", "None) if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance else: nearest_cluster_location = None if nearest_cluster_location:", "X and Y coordinates of the cluster centroid; • a list of objects", "in nearby_grid_cells] ) def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold): pretender_distance = distance(coords, pretender) if", "distance(coords, pretender) if pretender_distance < threshold: if champion_with_distance and champion_with_distance[1] <= pretender_distance: return", "- coords_2[1], 2)) def get_grid_cell(x, y, threshold): return (int(x // threshold), int(y //", "the cluster centroid; • a list of objects belonging to the cluster. Cluster’s", "= [] new_cluster_location = coords cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell, {}) clustered[new_cluster_grid_cell][new_cluster_location]", "itertools import chain, product from math import sqrt def distance(coords_1, coords_2): return sqrt(pow(coords_1[0]", "nearest_cluster_with_distance else: nearest_cluster_location = None if nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster =", "threshold: if champion_with_distance and champion_with_distance[1] <= pretender_distance: return champion_with_distance else: return (pretender, pretender_distance)", "grid_y + 2))] def cluster_iter(clustered, point, threshold): \"\"\"Add a point to a grid-like", "else: cluster = [] new_cluster_location = coords cluster.append(point) new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold) clustered.setdefault(new_cluster_grid_cell,", "and Y coordinates of the cluster centroid; • a list of objects belonging", "y, threshold): return (int(x // threshold), int(y // threshold)) def get_nearby_grid_cells(grid_cell): grid_x, grid_y", "• a two-element tuple with X and Y coordinates of the cluster centroid;", "return champion_with_distance else: return (pretender, pretender_distance) else: return champion_with_distance nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations,", "(cluster_object_count + 1), ) else: cluster = [] new_cluster_location = coords cluster.append(point) new_cluster_grid_cell", "in clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells] ) def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold):", "get_grid_cell(x, y, threshold): return (int(x // threshold), int(y // threshold)) def get_nearby_grid_cells(grid_cell): grid_x,", "structure. This allows comparing point distances only to clusters from nearby grids, not", "len(cluster) new_cluster_location = ( (nearest_cluster_location[0] * cluster_object_count + coords[0]) / (cluster_object_count + 1),", "grids, not to all clusters. Useful when there are many clusters expected.\"\"\" coords,", "coordinates of the cluster centroid; • a list of objects belonging to the", "cluster’s members. \"\"\" cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold) clustered = reduce(cluster_iter_for_threshold, points, {}) return", "cluster centroid; • a list of objects belonging to the cluster. Cluster’s centroid", "nearest_cluster_location = None if nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count", "clustered def cluster(points, threshold): \"\"\"Cluster points using distance-based clustering algorithm. Arguments: points —", "(nearest_cluster_location[1] * cluster_object_count + coords[1]) / (cluster_object_count + 1), ) else: cluster =", "points — an iterable of two-element point tuples, each containing: • a two-element", "with X and Y coordinates of the cluster centroid; • a list of", "for grid_cell in nearby_grid_cells] ) def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold): pretender_distance = distance(coords,", "get_grid_cell(*coords, threshold=threshold) nearby_grid_cells = get_nearby_grid_cells(point_grid_cell) possible_nearby_cluster_locations = chain( *[(location for location in clustered.get(grid_cell,", "cluster_object_count + coords[1]) / (cluster_object_count + 1), ) else: cluster = [] new_cluster_location", "*[((location, [object_ for coords, object_ in points]) for location, points in grid_clusters.items()) for", "two-element cluster tuples, each containing: • a two-element tuple with X and Y", "threshold): return (int(x // threshold), int(y // threshold)) def get_nearby_grid_cells(grid_cell): grid_x, grid_y =", "cluster(points, threshold): \"\"\"Cluster points using distance-based clustering algorithm. Arguments: points — an iterable", "point distances only to clusters from nearby grids, not to all clusters. Useful", "nearest_cluster_location: cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold) cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster) new_cluster_location =", "a two-element tuple with X and Y coordinates, • the actual object being", "+ 2))] def cluster_iter(clustered, point, threshold): \"\"\"Add a point to a grid-like cluster", "• a two-element tuple with X and Y coordinates, • the actual object", "using distance-based clustering algorithm. Arguments: points — an iterable of two-element point tuples,", "defined as average coordinates of the cluster’s members. \"\"\" cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold)", "(pretender, pretender_distance) else: return champion_with_distance nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None) if nearest_cluster_with_distance: nearest_cluster_location,", "range(grid_y - 1, grid_y + 2))] def cluster_iter(clustered, point, threshold): \"\"\"Add a point", "cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location) cluster_object_count = len(cluster) new_cluster_location = ( (nearest_cluster_location[0] * cluster_object_count +", "= distance(coords, pretender) if pretender_distance < threshold: if champion_with_distance and champion_with_distance[1] <= pretender_distance:", "clustered = reduce(cluster_iter_for_threshold, points, {}) return chain( *[((location, [object_ for coords, object_ in", "object_ in points]) for location, points in grid_clusters.items()) for grid_clusters in clustered.values()] )", "/ (cluster_object_count + 1), (nearest_cluster_location[1] * cluster_object_count + coords[1]) / (cluster_object_count + 1),", "possible_nearby_cluster_locations, None) if nearest_cluster_with_distance: nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance else: nearest_cluster_location = None if", "with X and Y coordinates, • the actual object being clustered; threshold —" ]
[ "check while index < len(numbers): # [1, 2, 3, 4, 5] O(n) curr", "pointer to the sides def product_no_self(numbers): start = 0 end = 0 index", "end = index + 1 # 4, 5 pro = 1 while start", "> should return output: [1152, 1920, 1440, 2880, 960, 720] \"\"\" # slicing", "index = 0 pro = 1 output = [] # bound check while", "the current num we want to ignore and expand the pointer to the", "-= 1 if end < len(numbers): pro *= numbers[end] end += 1 output.append(pro)", "operator. Example input 1: [1, 2, 3, 4, 5] = > should return", "input 1: [1, 2, 3, 4, 5] = > should return output: [120,", "[1, 2, 3, 4, 5] = > should return output: [120, 60, 40,", "or end < len(numbers): # O(n) if start > -1: pro *= numbers[start]", "and expand the pointer to the sides def product_no_self(numbers): start = 0 end", "the division operator. Example input 1: [1, 2, 3, 4, 5] = >", "in the array without using the division operator. Example input 1: [1, 2,", "we want to ignore and expand the pointer to the sides def product_no_self(numbers):", "Given an array of integers, replace each element of the array with product", "\"\"\" # slicing the array arr[0:self-1]+arr[self:] # start two pointers at the current", "division operator. Example input 1: [1, 2, 3, 4, 5] = > should", "start = 0 end = 0 index = 0 pro = 1 output", "+= 1 output.append(pro) index += 1 return output numbers = [1, 2, 3,", "Example input 1: [1, 2, 3, 4, 5] = > should return output:", "return output: [120, 60, 40, 30, 24] Example input 2: [5, 3, 4,", "\"\"\"Problem 2: Given an array of integers, replace each element of the array", "other element in the array without using the division operator. Example input 1:", "the array without using the division operator. Example input 1: [1, 2, 3,", "at the current num we want to ignore and expand the pointer to", "= > should return output: [120, 60, 40, 30, 24] Example input 2:", "curr = numbers[index] start = index - 1 # 2, 1 end =", "O(n) if start > -1: pro *= numbers[start] start -= 1 if end", "4, 5] O(n) curr = numbers[index] start = index - 1 # 2,", "3, 4, 5] O(n) curr = numbers[index] start = index - 1 #", "5 pro = 1 while start > -1 or end < len(numbers): #", "Example input 2: [5, 3, 4, 2, 6, 8] = > should return", "pro *= numbers[start] start -= 1 if end < len(numbers): pro *= numbers[end]", "the array with product of every other element in the array without using", "[5, 3, 4, 2, 6, 8] = > should return output: [1152, 1920,", "bound check while index < len(numbers): # [1, 2, 3, 4, 5] O(n)", "def product_no_self(numbers): start = 0 end = 0 index = 0 pro =", "integers, replace each element of the array with product of every other element", "to ignore and expand the pointer to the sides def product_no_self(numbers): start =", "# start two pointers at the current num we want to ignore and", "1 if end < len(numbers): pro *= numbers[end] end += 1 output.append(pro) index", "0 index = 0 pro = 1 output = [] # bound check", "input 2: [5, 3, 4, 2, 6, 8] = > should return output:", "while index < len(numbers): # [1, 2, 3, 4, 5] O(n) curr =", "numbers[start] start -= 1 if end < len(numbers): pro *= numbers[end] end +=", "# 2, 1 end = index + 1 # 4, 5 pro =", "end < len(numbers): # O(n) if start > -1: pro *= numbers[start] start", "= 0 end = 0 index = 0 pro = 1 output =", "1 # 4, 5 pro = 1 while start > -1 or end", "of the array with product of every other element in the array without", "start -= 1 if end < len(numbers): pro *= numbers[end] end += 1", "len(numbers): # O(n) if start > -1: pro *= numbers[start] start -= 1", "[1, 2, 3, 4, 5] O(n) curr = numbers[index] start = index -", "start > -1: pro *= numbers[start] start -= 1 if end < len(numbers):", "if end < len(numbers): pro *= numbers[end] end += 1 output.append(pro) index +=", "of integers, replace each element of the array with product of every other", "-1 or end < len(numbers): # O(n) if start > -1: pro *=", "replace each element of the array with product of every other element in", "4, 5 pro = 1 while start > -1 or end < len(numbers):", "output: [1152, 1920, 1440, 2880, 960, 720] \"\"\" # slicing the array arr[0:self-1]+arr[self:]", "720] \"\"\" # slicing the array arr[0:self-1]+arr[self:] # start two pointers at the", "= 0 index = 0 pro = 1 output = [] # bound", "should return output: [1152, 1920, 1440, 2880, 960, 720] \"\"\" # slicing the", "index - 1 # 2, 1 end = index + 1 # 4,", "end += 1 output.append(pro) index += 1 return output numbers = [1, 2,", "num we want to ignore and expand the pointer to the sides def", "= index + 1 # 4, 5 pro = 1 while start >", "+ 1 # 4, 5 pro = 1 while start > -1 or", "should return output: [120, 60, 40, 30, 24] Example input 2: [5, 3,", "2: Given an array of integers, replace each element of the array with", "1 while start > -1 or end < len(numbers): # O(n) if start", "index += 1 return output numbers = [1, 2, 3, 4, 5] #", "array with product of every other element in the array without using the", "2, 6, 8] = > should return output: [1152, 1920, 1440, 2880, 960,", "24] Example input 2: [5, 3, 4, 2, 6, 8] = > should", "2, 3, 4, 5] O(n) curr = numbers[index] start = index - 1", "2: [5, 3, 4, 2, 6, 8] = > should return output: [1152,", "end = 0 index = 0 pro = 1 output = [] #", "output = [] # bound check while index < len(numbers): # [1, 2,", "without using the division operator. Example input 1: [1, 2, 3, 4, 5]", "to the sides def product_no_self(numbers): start = 0 end = 0 index =", "len(numbers): pro *= numbers[end] end += 1 output.append(pro) index += 1 return output", "[1, 2, 3, 4, 5] # space is O(n) # run time is", "+= 1 return output numbers = [1, 2, 3, 4, 5] # space", "6, 8] = > should return output: [1152, 1920, 1440, 2880, 960, 720]", "2, 3, 4, 5] = > should return output: [120, 60, 40, 30,", "# O(n) if start > -1: pro *= numbers[start] start -= 1 if", "5] O(n) curr = numbers[index] start = index - 1 # 2, 1", "using the division operator. Example input 1: [1, 2, 3, 4, 5] =", "current num we want to ignore and expand the pointer to the sides", "end < len(numbers): pro *= numbers[end] end += 1 output.append(pro) index += 1", "[1152, 1920, 1440, 2880, 960, 720] \"\"\" # slicing the array arr[0:self-1]+arr[self:] #", "3, 4, 5] = > should return output: [120, 60, 40, 30, 24]", "numbers[end] end += 1 output.append(pro) index += 1 return output numbers = [1,", "> should return output: [120, 60, 40, 30, 24] Example input 2: [5,", "> -1 or end < len(numbers): # O(n) if start > -1: pro", "want to ignore and expand the pointer to the sides def product_no_self(numbers): start", "numbers[index] start = index - 1 # 2, 1 end = index +", "sides def product_no_self(numbers): start = 0 end = 0 index = 0 pro", "product of every other element in the array without using the division operator.", "O(n) curr = numbers[index] start = index - 1 # 2, 1 end", "start > -1 or end < len(numbers): # O(n) if start > -1:", "> -1: pro *= numbers[start] start -= 1 if end < len(numbers): pro", "with product of every other element in the array without using the division", "*= numbers[end] end += 1 output.append(pro) index += 1 return output numbers =", "< len(numbers): # [1, 2, 3, 4, 5] O(n) curr = numbers[index] start", "element of the array with product of every other element in the array", "2, 3, 4, 5] # space is O(n) # run time is O(n^2)", "return output: [1152, 1920, 1440, 2880, 960, 720] \"\"\" # slicing the array", "index + 1 # 4, 5 pro = 1 while start > -1", "pro = 1 while start > -1 or end < len(numbers): # O(n)", "40, 30, 24] Example input 2: [5, 3, 4, 2, 6, 8] =", "3, 4, 2, 6, 8] = > should return output: [1152, 1920, 1440,", "= [] # bound check while index < len(numbers): # [1, 2, 3,", "= index - 1 # 2, 1 end = index + 1 #", "# 4, 5 pro = 1 while start > -1 or end <", "the sides def product_no_self(numbers): start = 0 end = 0 index = 0", "2, 1 end = index + 1 # 4, 5 pro = 1", "two pointers at the current num we want to ignore and expand the", "pro *= numbers[end] end += 1 output.append(pro) index += 1 return output numbers", "numbers = [1, 2, 3, 4, 5] # space is O(n) # run", "= numbers[index] start = index - 1 # 2, 1 end = index", "start two pointers at the current num we want to ignore and expand", "< len(numbers): pro *= numbers[end] end += 1 output.append(pro) index += 1 return", "1 # 2, 1 end = index + 1 # 4, 5 pro", "2880, 960, 720] \"\"\" # slicing the array arr[0:self-1]+arr[self:] # start two pointers", "60, 40, 30, 24] Example input 2: [5, 3, 4, 2, 6, 8]", "1 return output numbers = [1, 2, 3, 4, 5] # space is", "= [1, 2, 3, 4, 5] # space is O(n) # run time", "1 output.append(pro) index += 1 return output numbers = [1, 2, 3, 4,", "0 pro = 1 output = [] # bound check while index <", "pointers at the current num we want to ignore and expand the pointer", "= 0 pro = 1 output = [] # bound check while index", "*= numbers[start] start -= 1 if end < len(numbers): pro *= numbers[end] end", "product_no_self(numbers): start = 0 end = 0 index = 0 pro = 1", "len(numbers): # [1, 2, 3, 4, 5] O(n) curr = numbers[index] start =", "start = index - 1 # 2, 1 end = index + 1", "4, 2, 6, 8] = > should return output: [1152, 1920, 1440, 2880,", "-1: pro *= numbers[start] start -= 1 if end < len(numbers): pro *=", "index < len(numbers): # [1, 2, 3, 4, 5] O(n) curr = numbers[index]", "element in the array without using the division operator. Example input 1: [1,", "slicing the array arr[0:self-1]+arr[self:] # start two pointers at the current num we", "4, 5] = > should return output: [120, 60, 40, 30, 24] Example", "1: [1, 2, 3, 4, 5] = > should return output: [120, 60,", "every other element in the array without using the division operator. Example input", "[] # bound check while index < len(numbers): # [1, 2, 3, 4,", "# slicing the array arr[0:self-1]+arr[self:] # start two pointers at the current num", "array arr[0:self-1]+arr[self:] # start two pointers at the current num we want to", "<filename>mock_interview/spring_mock_at_make_school.py<gh_stars>0 \"\"\"Problem 2: Given an array of integers, replace each element of the", "5] = > should return output: [120, 60, 40, 30, 24] Example input", "array of integers, replace each element of the array with product of every", "[120, 60, 40, 30, 24] Example input 2: [5, 3, 4, 2, 6,", "the pointer to the sides def product_no_self(numbers): start = 0 end = 0", "1 end = index + 1 # 4, 5 pro = 1 while", "each element of the array with product of every other element in the", "output numbers = [1, 2, 3, 4, 5] # space is O(n) #", "3, 4, 5] # space is O(n) # run time is O(n^2) print(product_no_self(numbers))", "30, 24] Example input 2: [5, 3, 4, 2, 6, 8] = >", "< len(numbers): # O(n) if start > -1: pro *= numbers[start] start -=", "1440, 2880, 960, 720] \"\"\" # slicing the array arr[0:self-1]+arr[self:] # start two", "expand the pointer to the sides def product_no_self(numbers): start = 0 end =", "- 1 # 2, 1 end = index + 1 # 4, 5", "output: [120, 60, 40, 30, 24] Example input 2: [5, 3, 4, 2,", "8] = > should return output: [1152, 1920, 1440, 2880, 960, 720] \"\"\"", "0 end = 0 index = 0 pro = 1 output = []", "1 output = [] # bound check while index < len(numbers): # [1,", "960, 720] \"\"\" # slicing the array arr[0:self-1]+arr[self:] # start two pointers at", "pro = 1 output = [] # bound check while index < len(numbers):", "an array of integers, replace each element of the array with product of", "the array arr[0:self-1]+arr[self:] # start two pointers at the current num we want", "= 1 output = [] # bound check while index < len(numbers): #", "# [1, 2, 3, 4, 5] O(n) curr = numbers[index] start = index", "arr[0:self-1]+arr[self:] # start two pointers at the current num we want to ignore", "# bound check while index < len(numbers): # [1, 2, 3, 4, 5]", "if start > -1: pro *= numbers[start] start -= 1 if end <", "= > should return output: [1152, 1920, 1440, 2880, 960, 720] \"\"\" #", "of every other element in the array without using the division operator. Example", "ignore and expand the pointer to the sides def product_no_self(numbers): start = 0", "1920, 1440, 2880, 960, 720] \"\"\" # slicing the array arr[0:self-1]+arr[self:] # start", "output.append(pro) index += 1 return output numbers = [1, 2, 3, 4, 5]", "return output numbers = [1, 2, 3, 4, 5] # space is O(n)", "= 1 while start > -1 or end < len(numbers): # O(n) if", "while start > -1 or end < len(numbers): # O(n) if start >", "array without using the division operator. Example input 1: [1, 2, 3, 4," ]
[ "def test_store_host(): from dallinger.command_line.config import get_configured_hosts from dallinger.command_line.config import store_host host1 = dict(user=\"test_user_1\",", "each test\"\"\" tmp_dir = tempfile.mkdtemp() with mock.patch(\"dallinger.command_line.config.APPDIRS\") as mock_appdirs: mock_appdirs.user_data_dir = tmp_dir yield", "== {\"test_host_1\": host1, \"test_host_2\": host2} @pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey patch appdirs to provede", "= dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir)", "tmp_appdir(): \"\"\"Monkey patch appdirs to provede a pristine dirctory to each test\"\"\" tmp_dir", "{\"test_host_1\": host1, \"test_host_2\": host2} def test_store_host(): from dallinger.command_line.config import get_configured_hosts from dallinger.command_line.config import", "/ \"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() == {\"test_host_1\":", "\"test_host_2\": host2} def test_store_host(): from dallinger.command_line.config import get_configured_hosts from dallinger.command_line.config import store_host host1", "store_host(host2) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} @pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey patch", "import store_host host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2) assert", "host2 = dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\" /", "host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts()", "get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} @pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey patch appdirs to", "store_host(host1), store_host(host2) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} @pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey", "host1, \"test_host_2\": host2} def test_store_host(): from dallinger.command_line.config import get_configured_hosts from dallinger.command_line.config import store_host", "host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\" / \"test_host_1\").write_text(json.dumps(host1))", "from pathlib import Path import json import mock import pytest import shutil import", "== {} def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir() host1 =", "import tempfile def test_list_hosts_empty(): from dallinger.command_line.config import get_configured_hosts assert get_configured_hosts() == {} def", "get_configured_hosts from dallinger.command_line.config import store_host host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\")", "to provede a pristine dirctory to each test\"\"\" tmp_dir = tempfile.mkdtemp() with mock.patch(\"dallinger.command_line.config.APPDIRS\")", "test\"\"\" tmp_dir = tempfile.mkdtemp() with mock.patch(\"dallinger.command_line.config.APPDIRS\") as mock_appdirs: mock_appdirs.user_data_dir = tmp_dir yield tmp_dir", "= dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\" / \"test_host_2\").write_text(json.dumps(host2))", "def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir() host1 = dict(user=\"test_user_1\", host=\"test_host_1\")", "\"\"\"Monkey patch appdirs to provede a pristine dirctory to each test\"\"\" tmp_dir =", "/ \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} def test_store_host(): from dallinger.command_line.config", "host2 = dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2}", "appdirs to provede a pristine dirctory to each test\"\"\" tmp_dir = tempfile.mkdtemp() with", "import json import mock import pytest import shutil import tempfile def test_list_hosts_empty(): from", "assert get_configured_hosts() == {} def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir()", "import Path import json import mock import pytest import shutil import tempfile def", "host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\"", "json import mock import pytest import shutil import tempfile def test_list_hosts_empty(): from dallinger.command_line.config", "get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} def test_store_host(): from dallinger.command_line.config import get_configured_hosts from", "host2} @pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey patch appdirs to provede a pristine dirctory to", "/ \"hosts\").mkdir() host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\"", "def test_list_hosts_empty(): from dallinger.command_line.config import get_configured_hosts assert get_configured_hosts() == {} def test_list_hosts_results(tmp_appdir): from", "{\"test_host_1\": host1, \"test_host_2\": host2} @pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey patch appdirs to provede a", "dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2) assert get_configured_hosts() == {\"test_host_1\": host1,", "import get_configured_hosts from dallinger.command_line.config import store_host host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\",", "\"test_host_2\": host2} @pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey patch appdirs to provede a pristine dirctory", "host=\"test_host_2\") store_host(host1), store_host(host2) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} @pytest.fixture(autouse=True) def tmp_appdir():", "Path import json import mock import pytest import shutil import tempfile def test_list_hosts_empty():", "/ \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\":", "\"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() == {\"test_host_1\": host1,", "(Path(tmp_appdir) / \"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() ==", "dirctory to each test\"\"\" tmp_dir = tempfile.mkdtemp() with mock.patch(\"dallinger.command_line.config.APPDIRS\") as mock_appdirs: mock_appdirs.user_data_dir =", "provede a pristine dirctory to each test\"\"\" tmp_dir = tempfile.mkdtemp() with mock.patch(\"dallinger.command_line.config.APPDIRS\") as", "get_configured_hosts assert get_configured_hosts() == {} def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir) /", "dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} @pytest.fixture(autouse=True) def", "import mock import pytest import shutil import tempfile def test_list_hosts_empty(): from dallinger.command_line.config import", "get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir() host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir)", "{} def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir() host1 = dict(user=\"test_user_1\",", "== {\"test_host_1\": host1, \"test_host_2\": host2} def test_store_host(): from dallinger.command_line.config import get_configured_hosts from dallinger.command_line.config", "mock import pytest import shutil import tempfile def test_list_hosts_empty(): from dallinger.command_line.config import get_configured_hosts", "= dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2) assert get_configured_hosts() == {\"test_host_1\":", "pristine dirctory to each test\"\"\" tmp_dir = tempfile.mkdtemp() with mock.patch(\"dallinger.command_line.config.APPDIRS\") as mock_appdirs: mock_appdirs.user_data_dir", "def tmp_appdir(): \"\"\"Monkey patch appdirs to provede a pristine dirctory to each test\"\"\"", "host2} def test_store_host(): from dallinger.command_line.config import get_configured_hosts from dallinger.command_line.config import store_host host1 =", "test_store_host(): from dallinger.command_line.config import get_configured_hosts from dallinger.command_line.config import store_host host1 = dict(user=\"test_user_1\", host=\"test_host_1\")", "(Path(tmp_appdir) / \"hosts\").mkdir() host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) /", "tmp_dir = tempfile.mkdtemp() with mock.patch(\"dallinger.command_line.config.APPDIRS\") as mock_appdirs: mock_appdirs.user_data_dir = tmp_dir yield tmp_dir shutil.rmtree(tmp_dir)", "pathlib import Path import json import mock import pytest import shutil import tempfile", "<reponame>Dallinger/Dallinger from pathlib import Path import json import mock import pytest import shutil", "from dallinger.command_line.config import store_host host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1),", "test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir() host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2", "dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert", "dallinger.command_line.config import get_configured_hosts from dallinger.command_line.config import store_host host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 =", "from dallinger.command_line.config import get_configured_hosts assert get_configured_hosts() == {} def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import", "@pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey patch appdirs to provede a pristine dirctory to each", "dallinger.command_line.config import get_configured_hosts assert get_configured_hosts() == {} def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import get_configured_hosts", "(Path(tmp_appdir) / \"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} def", "pytest import shutil import tempfile def test_list_hosts_empty(): from dallinger.command_line.config import get_configured_hosts assert get_configured_hosts()", "from dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir() host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 =", "from dallinger.command_line.config import get_configured_hosts from dallinger.command_line.config import store_host host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2", "test_list_hosts_empty(): from dallinger.command_line.config import get_configured_hosts assert get_configured_hosts() == {} def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config", "a pristine dirctory to each test\"\"\" tmp_dir = tempfile.mkdtemp() with mock.patch(\"dallinger.command_line.config.APPDIRS\") as mock_appdirs:", "dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir() host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\",", "store_host host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2) assert get_configured_hosts()", "to each test\"\"\" tmp_dir = tempfile.mkdtemp() with mock.patch(\"dallinger.command_line.config.APPDIRS\") as mock_appdirs: mock_appdirs.user_data_dir = tmp_dir", "assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} @pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey patch appdirs", "get_configured_hosts() == {} def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir() host1", "\"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} def test_store_host(): from dallinger.command_line.config import", "import pytest import shutil import tempfile def test_list_hosts_empty(): from dallinger.command_line.config import get_configured_hosts assert", "dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\" / \"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) /", "import get_configured_hosts (Path(tmp_appdir) / \"hosts\").mkdir() host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\")", "\"test_host_1\").write_text(json.dumps(host1)) (Path(tmp_appdir) / \"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2}", "\"hosts\").mkdir() host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") (Path(tmp_appdir) / \"hosts\" /", "host1, \"test_host_2\": host2} @pytest.fixture(autouse=True) def tmp_appdir(): \"\"\"Monkey patch appdirs to provede a pristine", "patch appdirs to provede a pristine dirctory to each test\"\"\" tmp_dir = tempfile.mkdtemp()", "assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} def test_store_host(): from dallinger.command_line.config import get_configured_hosts", "host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\":", "import get_configured_hosts assert get_configured_hosts() == {} def test_list_hosts_results(tmp_appdir): from dallinger.command_line.config import get_configured_hosts (Path(tmp_appdir)", "tempfile def test_list_hosts_empty(): from dallinger.command_line.config import get_configured_hosts assert get_configured_hosts() == {} def test_list_hosts_results(tmp_appdir):", "\"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} def test_store_host(): from", "shutil import tempfile def test_list_hosts_empty(): from dallinger.command_line.config import get_configured_hosts assert get_configured_hosts() == {}", "dallinger.command_line.config import store_host host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2)", "import shutil import tempfile def test_list_hosts_empty(): from dallinger.command_line.config import get_configured_hosts assert get_configured_hosts() ==", "= dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} @pytest.fixture(autouse=True)", "/ \"hosts\" / \"test_host_2\").write_text(json.dumps(host2)) assert get_configured_hosts() == {\"test_host_1\": host1, \"test_host_2\": host2} def test_store_host():", "host1 = dict(user=\"test_user_1\", host=\"test_host_1\") host2 = dict(user=\"test_user_2\", host=\"test_host_2\") store_host(host1), store_host(host2) assert get_configured_hosts() ==" ]
[ "the first subtensorspec (int) entities of the embedding matrix (default None: all entities).", "r_subtensorspec) slro = sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] errl +=", "function inputs. :input idxl: index value of the 'left' member. :input idxo: index", "= {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100}) dres.update({'micrormean':", "io] rmv_idx_r += [true_triples[i, 2] for i in inter_r if true_triples[i, 2] !=", "[np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, true_triples,", "1] return errl, errr def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, true_triples, relation2domainSet, relation2rangeSet,", "of relation indices. \"\"\" errr = [] for l, o, r in zip(idxl,", "n, round(dres['macrolhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%' %", "(relationr.E[:, idxo]).reshape((1, relationr.D)) tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1])))", "from true_triples from ranking results if true_triples is not None: il = np.argwhere(true_triples[:,", "inputs. :input idxr: index value of the 'right' member. :input idxo: index value", "np.mean(np.asarray(res[0]) <= n) * 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n)", "np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s, median: %s,", "the similarity score for the entities corresponding to the first subtensorspec (int) entities", "lhs: 1xD vector containing the embedding of idxl if subtensorspec is not None:", "Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding", "idxl, idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None): errl, errr = [], [] relidxs", "vector containing the embedding of idxo (relationr) tmp = leftop(lhs, rell) # a", "[np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def RankingScoreRightIdx(sr, idxl, idxr, idxo): \"\"\"", "rel2range[relidx] dr_domain = set(dr_domain) dr_range = set(dr_range) test_triples = [(l, o, r) for", "in il if i in io] rmv_idx_r += [true_triples[i, 2] for i in", "100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian})", "returns a Theano function to measure the similarity score of all 'right' entities", "if o == relidx] for l, o, r in test_triples: rmv_idx_l, rmv_idx_r =", "dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())})", "parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') g = T.matrix('g') # Graph", "sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1]", "# Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') # Graph if subtensorspec is not", "relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') # # LEVERAGING RANGE AND DOMAIN", "T.iscalar('idxo') # Graph if subtensorspec is not None: # We compute the score", "# a = rell(lhs) # b = relr(rhs) simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs,", "scores_l = (sl(r, o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r", "if subtensorspec is not None: # We compute the score only for a", "true_triples from ranking results if true_triples is not None: il = np.argwhere(true_triples[:, 0]", "r] scores_r = (sr(l, o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1]", "indices. \"\"\" errr = [] for l, o, r in zip(idxl, idxo, idxr):", "{} dictrellmedian = {} dictrelrmedian = {} dictrelgmedian = {} dictrellrn = {}", "idxo: list of relation indices. \"\"\" errr = [] for l, o, r", "[i for i in il if i in io] rmv_idx_r = [true_triples[i, 2]", "+ 1] return errl, errr def FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo, true_triples): \"\"\"", "r in [(idxl[i], idxo[i], idxr[i]) for i in selection]: errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l]", "triples r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches += classification_matches(r_valid_energies,", "results pen_idx_l = [cl for cl in range(len(scores_l)) if cl not in dr_domain]", "computes the rank list of the lhs and rhs, over a list of", "n, round(dres['macrorhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%' %", "all entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo =", "np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.roc_curve(labels, predictions) auc = metrics.auc(recall, precision) return", "== o).reshape(-1,) ir = np.argwhere(true_triples[:, 2] == r).reshape(-1,) inter_l = [i for i", "= [true_triples[i,2] for i in inter_r if true_triples[i,2] != r] scores_r = (sr(l,", "n, round(dres['microghits@n'], 3))) if idxo is not None: listrel = set(idxo) dictrelres =", "values. \"\"\" return theano.function([idxl, idxo], [simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None):", "T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs, rhs = embedding.E[:, idxl].T, embedding.E[:, idxr].T rell, relr =", "np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] +", "comparisons] return ret def classify(energies, threshold): classifications = np.asarray([1 if energy < threshold", "(sr(l, o)[0]).flatten() # Remove triples not in domain and range from ranking results", "= metrics.auc(recall, precision) return auc # # COMPUTING PERFORMANCE METRICS ON RANKINGS #", "hits@%s: %s%%' % ( round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3))) logging.info('\\t-- global", "rhs: NxD embedding matrix rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector", "= fnsim(a, b) \"\"\" Theano function inputs. :input idxl: index value of the", "zip(idxl, idxo, idxr): errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r]", "simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano function inputs. :input idxr: index", "!= l] inter_r = [i for i in il if i in io]", "rmv_idx_r = [], [] # Remove triples from true_triples from ranking results if", "(relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of idxo (relationl)", "def find_classification_threshold(energies, targets): x = np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]], (x[1:] + x[:-1]) /", "predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.precision_recall_curve(labels, predictions) auc =", "i in listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] +", "leftop(lhs, rell) # a = rell(lhs) # b = relr(rhs) # Negative Energy", "= -np.inf scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] +", "idxr, idxo, true_triples): \"\"\" This function computes the rank list of the lhs", "in enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for i, j in enumerate(res[1]): dictrelres[idxo[i]][1] += [j]", "\"\"\" Theano function inputs. :input idxr: index value of the 'right' member. :input", "np.mean(np.asarray(res[1]) <= n) * 100}) resg = res[0] + res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian':", "idxr].T rell, relr = relationl.E[:, idxo].T, relationr.E[:, idxo].T energy = - fnsim(leftop(lhs, rell),", "the 'selection' parameter. \"\"\" errl, errr = [], [] for l, o, r", "rmv_idx_l = [true_triples[i, 0] for i in inter_l if true_triples[i, 0] != l]", "threshold = metrics.roc_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc # # COMPUTING", "'relidx' predicate, and the corresponding target values test_idxs = np.where(testoidx == relidx) r_testlidx,", "i in io] rmv_idx_r += [true_triples[i, 2] for i in inter_r if true_triples[i,", ">> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['micrormean'], 5), round(dres['micrormedian'], 5),", "n) * 100}) logging.info('### MICRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s,", "cl in range(len(scores_l)) if cl not in dr_domain] pen_idx_r = [cr for cr", "round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median:", "# rhs: NxD embedding matrix rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD", "for i in inter_r if true_triples[i, 2] != r] scores_r = (srlo).flatten() scores_r[rmv_idx_r]", "the corresponding target values valid_idxs = np.where(validoidx == relidx) r_validlidx, r_validridx, r_validoidx =", "dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):'", "inter_r if true_triples[i, 2] != r] scores_l = (sl(r, o)[0]).flatten() scores_r = (sr(l,", "* l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec) slro =", "errr def RankingScoreRightIdx(sr, idxl, idxr, idxo): \"\"\" This function computes the rank list", "idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None): errl, errr = [], [] relidxs = np.unique(idxo)", "l, o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i", "idxr, idxo): \"\"\" This function computes the rank list of the rhs, over", "sr, idxl, idxr, idxo, true_triples): \"\"\" This function computes the rank list of", "+ dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1])", "+= [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def RankingScoreRightIdx(sr, idxl, idxr, idxo):", "a = rell(lhs) # b = relr(rhs) # Negative Energy simi = fnsim(tmp.reshape((1,", "( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3))) logging.info('\\t-- right >> mean: %s,", "hits@%s: %s%%' % ( round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3))) logging.info('\\t-- right", "idxr: list of 'right' indices. :param idxo: list of relation indices. \"\"\" errr", "= {} dictrelrmedian = {} dictrelgmedian = {} dictrellrn = {} dictrelrrn =", "member. :input idxo: index value of the relation member. Theano function output. :output", "errr = [] for l, o, r in zip(idxl, idxo, idxr): gl =", "prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo", "dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n':", "!= r] scores_r = (sr(l, o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] +", "% ((np.mean(valid_matches) * 100.0), (np.mean(test_matches) * 100.0))) def find_classification_threshold(energies, targets): x = np.unique(np.sort(energies))", "in listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1])", "= (embedding.E[:, idxl]).reshape((1, embedding.D)) rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1,", "np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())})", "T import logging from sklearn import metrics from sparse.learning import parse_embeddings def auc_pr(predictions=[],", "for cutpoint in cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def classification_matches(energies, targets, threshold):", "rell) # a = rell(lhs) # b = relr(rhs) simi = fnsim(tmp.reshape((1, tmp.shape[1])),", "entities given couples of relation and 'right' entities (as index values). :param fnsim:", "relation member. Theano function output. :output simi: vector of score values. \"\"\" return", "of score values. \"\"\" return theano.function([idxr, idxo], [simi], on_unused_input='ignore') def RankingScoreIdx(sl, sr, idxl,", "relidx in relidxs: dr_domain, dr_range = rel2domain[relidx], rel2range[relidx] dr_domain = set(dr_domain) dr_range =", "of entities rhs = (embedding.E[:, :subtensorspec]).T else: rhs = embedding.E.T # rhs: NxD", "+= [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl, errr def RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo,", "( round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3))) if idxo is not None:", "LEARNING # def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None): errl,", "relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs, rhs =", "\"\"\" This function computes the rank list of the rhs, over a list", "if energy < threshold else 0 for energy in energies]) return classifications #", "np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])})", "= T.matrix('g') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector", "for i in inter_l if true_triples[i,0] != l] scores_l = (sl(r, o)[0]).flatten() scores_l[rmv_idx_l]", "set(dr_domain) dr_range = set(dr_range) test_triples = [(l, o, r) for (l, o, r)", "Under the Receiver Operating Characteristic Curve (AUC-ROC)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision,", "[o] * r_subtensorspec) slro = sl(r, o, gl)[0] srlo = sr(l, o, gr)[0]", "leftop(lhs, rell) # a = rell(lhs) # b = relr(rhs) simi = fnsim(tmp.reshape((1,", "rel2domain[relidx], rel2range[relidx] dr_domain = set(dr_domain) dr_range = set(dr_range) test_triples = [(l, o, r)", "classifications = classify(energies, threshold) comparisons = (targets == classifications) ret = [1. if", "rightop(rhs, relr)) # simi = fnsim(a, b) \"\"\" Theano function inputs. :input idxl:", "list of relation indices. \"\"\" errl = [] errr = [] for l,", "scores_l = (slro).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r =", "= T.iscalar('idxr'), T.iscalar('idxo') g = T.matrix('g') # Graph if subtensorspec is not None:", "= np.argwhere(true_triples[:, 2] == r).reshape(-1,) inter_l = [i for i in ir if", "rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'),", "the embedding of idxo (relationl) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD", "* 100.0), (np.mean(test_matches) * 100.0))) def find_classification_threshold(energies, targets): x = np.unique(np.sort(energies)) cutpoints =", "theano.function([idxl, idxo], [simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function", "relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs, rell),", "examples, defined in the 'selection' parameter. \"\"\" errl, errr = [], [] for", "dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean':", "inter_l = [i for i in ir if i in io] rmv_idx_l =", "r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec) slro = sl(r, o, gl)[0] srlo = sr(l,", "def EnergyVecFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr", "dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):' % (tag)) logging.info('\\t-- left >> mean:", "embedding of idxo (relationr) tmp = leftop(lhs, rell) # a = rell(lhs) #", "metrics from sparse.learning import parse_embeddings def auc_pr(predictions=[], labels=[]): '''Computes the Area Under the", "output. :output simi: vector of score values. \"\"\" return theano.function([idxl, idxo], [simi], on_unused_input='ignore')", "# b = relr(rhs) simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi =", "ranking_summary(res, idxo=None, n=10, tag='raw'): resg = res[0] + res[1] dres = {} dres.update({'microlmean':", "Precision-Recall Curve (AUC-PR)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.precision_recall_curve(labels,", "rell: 1xD vector containing the embedding of idxo (relationl) relr = (relationr.E[:, idxo]).reshape((1,", "prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo", "list of 'left' indices. :param idxr: list of 'right' indices. :param idxo: list", "FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo, true_triples): \"\"\" This function computes the rank list", "l] inter_r = [i for i in il if i in io] rmv_idx_r", "idxo]).reshape((1, relationr.D)) tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) pen_simi", "lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of", "test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy: %s -- Test Accuracy: %s' %", "index values). :param fnsim: similarity function (on Theano variables). :param embeddings: an Embeddings", "= [], [] for l, o, r in [(idxl[i], idxo[i], idxr[i]) for i", "scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty errl += [np.argsort(np.argsort(- scores_l)).flatten()[l]", "def RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns a Theano function", "is not None: listrel = set(idxo) dictrelres = {} dictrellmean = {} dictrelrmean", "+ 1] return errl, errr def RankingScoreRightIdx(sr, idxl, idxr, idxo): \"\"\" This function", "o).reshape(-1,) ir = np.argwhere(true_triples[:, 2] == r).reshape(-1,) inter_l = [i for i in", "zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o]", "dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n':", "b = relr(rhs) simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a,", "unique relation indexes relidxs = np.unique(validoidx) valid_matches, test_matches = [], [] # Iterate", "zip(idxl, idxo, idxr) if o == relidx] for l, o, r in test_triples:", "%s, median: %s, hits@%s: %s%%' % ( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'],", "if true_triples[i,0] != l] scores_l = (sl(r, o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf errl +=", "rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings, leftop, rightop):", "idxl, idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) rhs", "labels=[]): '''Computes the Area Under the Receiver Operating Characteristic Curve (AUC-ROC)''' predictions, labels", "return theano.function([idxr, idxo], [simi], on_unused_input='ignore') def RankingScoreIdx(sl, sr, idxl, idxr, idxo): \"\"\" This", "relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') # Graph", "Theano function created with RankLeftFnIdx(). :param sr: Theano function created with RankRightFnIdx(). :param", "dictrelrrn = {} dictrelgrn = {} for i in listrel: dictrelres.update({i: [[], []]})", "Select the validation triples containing the 'relidx' predicate, and the corresponding target values", "= sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] inter_l = [i for", "valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) # Select the test triples containing the 'relidx'", ":param fnsim: similarity function (on Theano variables). :param embeddings: an Embeddings instance. :param", "of relation indices. \"\"\" errl = [] errr = [] for l, o,", "o, gr)[0] inter_l = [i for i in ir if i in io]", "%s%%' % ( round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3))) logging.info('\\t-- global >>", "Accuracy: %s -- Test Accuracy: %s' % ((np.mean(valid_matches) * 100.0), (np.mean(test_matches) * 100.0)))", "b) pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo,", "((np.mean(valid_matches) * 100.0), (np.mean(test_matches) * 100.0))) def find_classification_threshold(energies, targets): x = np.unique(np.sort(energies)) cutpoints", "Evaluation summary (as in FB15k): # def ranking_summary(res, idxo=None, n=10, tag='raw'): resg =", "embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.ivector('idxl'),", "if true_triples[i,2] != r] scores_r = (sr(l, o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf errr +=", "(slro).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i for", "* 100.0))) def find_classification_threshold(energies, targets): x = np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]], (x[1:] +", "embeddings: an Embeddings instance. :param leftop: class for the 'left' operator. :param rightop:", "simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) \"\"\" Theano", "metrics.auc(recall, precision) return auc # # COMPUTING PERFORMANCE METRICS ON RANKINGS # #", "def auc_roc(predictions=[], labels=[]): '''Computes the Area Under the Receiver Operating Characteristic Curve (AUC-ROC)'''", "(x[1:] + x[:-1]) / 2., [x[-1]])) accuracies = [np.mean(classification_matches(energies, targets, cutpoint)) * 100.0", "target values valid_idxs = np.where(validoidx == relidx) r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs],", "= embedding.E.T rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr", "return errl, errr def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, true_triples, relation2domainSet, relation2rangeSet, schemaPenalty,", "dr_range = rel2domain[relidx], rel2range[relidx] dr_domain = set(dr_domain) dr_range = set(dr_range) test_triples = [(l,", "x[:-1]) / 2., [x[-1]])) accuracies = [np.mean(classification_matches(energies, targets, cutpoint)) * 100.0 for cutpoint", "dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100 dictrelrrn[i]", "FUNCTIONS # def RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns a", "+ 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def RankingScoreRightIdx(sr,", "np.where(testoidx == relidx) r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets = test_targets[test_idxs]", "i in il if i in io] rmv_idx_r = [true_triples[i, 2] for i", "+= [j] for i, j in enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for i in", "= np.where(validoidx == relidx) r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets =", "= T.iscalar('idxl'), T.iscalar('idxo') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD", "class for the 'right' operator. :param subtensorspec: only measure the similarity score for", "2., [x[-1]])) accuracies = [np.mean(classification_matches(energies, targets, cutpoint)) * 100.0 for cutpoint in cutpoints]", "* 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian':", "the Area Under the Precision-Recall Curve (AUC-PR)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision,", "= [cr for cr in range(len(scores_r)) if cr not in dr_range] scores_l[rmv_idx_l] =", "!= l] scores_l = (slro).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1]", "dictrelres.update({i: [[], []]}) for i, j in enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for i,", "+ 1] return errr # # COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS # def", "= sr(l, o, gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] +", "io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i for i in ir if i in io]", "cutpoint in cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def classification_matches(energies, targets, threshold): classifications", "3))) return dres # # RANKING FUNCTIONS # def RankRightFnIdx(fnsim, embeddings, leftop, rightop,", "= [(l, o, r) for (l, o, r) in zip(idxl, idxo, idxr) if", "created with RankRightFnIdx(). :param idxl: list of 'left' indices. :param idxr: list of", "1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def RankingScoreRightIdx(sr, idxl,", "scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i for i", "not in domain and range from ranking results pen_idx_l = [cl for cl", "validoidx, valid_targets, testlidx, testridx, testoidx, test_targets): # Find unique relation indexes relidxs =", "Theano function inputs. :input idxl: index value of the 'left' member. :input idxo:", "[cr for cr in range(len(scores_r)) if cr not in dr_range] scores_l[rmv_idx_l] = -np.inf", "not None: listrel = set(idxo) dictrelres = {} dictrellmean = {} dictrelrmean =", "targets, threshold): classifications = classify(energies, threshold) comparisons = (targets == classifications) ret =", "triples from true_triples from ranking results if true_triples is not None: il =", "ir if i in io] rmv_idx_l = [true_triples[i, 0] for i in inter_l", "in FB15k): # def ranking_summary(res, idxo=None, n=10, tag='raw'): resg = res[0] + res[1]", "dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] =", "fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) \"\"\" Theano function inputs.", "of the 'right' member. :input idxo: index value of the relation member. Theano", "5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3))) if idxo is not None: listrel =", ">> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5),", "[o] * l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec) slro", "* 100}) resg = res[0] + res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg)", "scores_l)).flatten()[l] + 1] errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1] return errl, errr #", "the energies of those triples r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies,", "only for a subset of entities rhs = (embedding.E[:, :subtensorspec]).T else: rhs =", "rell), tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano function inputs. :input idxr: index value of the", "CLASSIFICATIONS # def classification_summary(energyfn, validlidx, validridx, validoidx, valid_targets, testlidx, testridx, testoidx, test_targets): #", "# Find unique relation indexes relidxs = np.unique(validoidx) valid_matches, test_matches = [], []", "dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i]", "round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3))) return dres # # RANKING FUNCTIONS", "operator. :param subtensorspec: only measure the similarity score for the entities corresponding to", "np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]], (x[1:] + x[:-1]) / 2., [x[-1]])) accuracies = [np.mean(classification_matches(energies,", "for a subset of entities rhs = (embedding.E[:, :subtensorspec]).T else: rhs = embedding.E.T", "3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrormean'],", "relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return", "[np.argsort(np.argsort(- scores_l)).flatten()[l] + 1] errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1] return errl, errr", "dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())})", "res[1] dres = {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) *", "pen_idx_r = [cr for cr in range(len(scores_r)) if cr not in dr_range] scores_l[rmv_idx_l]", "recall, threshold = metrics.precision_recall_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc def auc_roc(predictions=[],", "logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microgmean'], 5),", "= (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of idxl", "l, o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec),", "energies of those triples r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets)", "RANGE AND DOMAIN RELATIONS DURING LEARNING # def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo,", ":param rightop: class for the 'right' operator. :param subtensorspec: only measure the similarity", "idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of idxl if subtensorspec", "= relationl.E[:, idxo].T, relationr.E[:, idxo].T energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return", "of idxl if subtensorspec is not None: # We compute the score only", "= [] errr = [] for l, o, r in zip(idxl, idxo, idxr):", "o == relidx] for l, o, r in test_triples: rmv_idx_l, rmv_idx_r = [],", "[np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx_Schema(sl,", "np.asarray(labels) precision, recall, threshold = metrics.roc_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc", "= np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.roc_curve(labels, predictions) auc = metrics.auc(recall, precision)", "simi = fnsim(a, b) pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1,", "- pen_simi return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl, sr, idxl,", "idxo], [energy], on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings)", "[r] * l_subtensorspec, [o] * l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o]", "test triples containing the 'relidx' predicate, and the corresponding target values test_idxs =", "= fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) pen_simi = g[0,", "on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings)", "-= illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1] errr +=", "those triples r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches +=", "return errl, errr def RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo, selection=[]): \"\"\" Similar to", "/ 2., [x[-1]])) accuracies = [np.mean(classification_matches(energies, targets, cutpoint)) * 100.0 for cutpoint in", "index value of the relation member. Theano function output. :output simi: vector of", "compute the score only for a subset of entities rhs = (embedding.E[:, :subtensorspec]).T", "value of the 'left' member. :input idxo: index value of the relation member.", "auc # # COMPUTING PERFORMANCE METRICS ON RANKINGS # # # Evaluation summary", "# Remove triples not in domain and range from ranking results pen_idx_l =", "(relationl) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding", "round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s:", "in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec,", "for l, o, r in zip(idxl, idxo, idxr): errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] +", "ON RANKINGS # # # Evaluation summary (as in FB15k): # def ranking_summary(res,", "return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings, leftop, rightop): embedding, relationl,", "'right' operator. :param subtensorspec: only measure the similarity score for the entities corresponding", "idxo, true_triples): \"\"\" This function computes the rank list of the lhs and", "[np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i for i in il if i in", "mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n,", "* r_subtensorspec) slro = sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] errl", "[simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns a", "n, round(dres['microlhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%' %", ":output simi: vector of score values. \"\"\" return theano.function([idxl, idxo], [simi], on_unused_input='ignore') def", "'right' entities (as index values). :param fnsim: similarity function (on Theano variables). :param", "io] rmv_idx_r = [true_triples[i,2] for i in inter_r if true_triples[i,2] != r] scores_r", "COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS # def classification_summary(energyfn, validlidx, validridx, validoidx, valid_targets, testlidx,", "entities of the embedding matrix (default None: all entities). \"\"\" embedding, relationl, relationr", "labels=[]): '''Computes the Area Under the Precision-Recall Curve (AUC-PR)''' predictions, labels = np.asarray(predictions),", "return errl, errr # # SCHEMA-AWARE RANKING FUNCTIONS # # # RANKING FUNCTIONS", ">> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrormean'], 5), round(dres['macrormedian'], 5),", "recall, threshold = metrics.roc_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc # #", "i in inter_r if true_triples[i, 2] != r] scores_r = (srlo).flatten() scores_r[rmv_idx_r] =", "precision, recall, threshold = metrics.precision_recall_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc def", "= parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') # Graph lhs =", "= embedding.E.T # rhs: NxD embedding matrix rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) #", "scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1] errr", "1] errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1] return errl, errr # # SCHEMA-AWARE", "relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') g = T.matrix('g')", "[], [] # Remove triples from true_triples from ranking results if true_triples is", "Theano function inputs. :input idxr: index value of the 'right' member. :input idxo:", "idxo is not None: listrel = set(idxo) dictrelres = {} dictrellmean = {}", "a = rell(lhs) # b = relr(rhs) simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr))", "[], [] # Iterate over unique relation indexes for relidx in relidxs: #", "(sr(l, o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl, errr", "%s, median: %s, hits@%s: %s%%' % ( round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'],", "relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs, rhs", "np.mean(np.asarray(resg) <= n) * 100}) logging.info('### MICRO (%s):' % (tag)) logging.info('\\t-- left >>", "Characteristic Curve (AUC-ROC)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.roc_curve(labels,", "[] for l, o, r in [(idxl[i], idxo[i], idxr[i]) for i in selection]:", "for i in inter_r if true_triples[i, 2] != r] scores_l = (sl(r, o)[0]).flatten()", "dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100}) logging.info('### MICRO (%s):' % (tag))", "== l).reshape(-1,) io = np.argwhere(true_triples[:, 1] == o).reshape(-1,) ir = np.argwhere(true_triples[:, 2] ==", "io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec) gr =", "'right' member. :input idxo: index value of the relation member. Theano function output.", "= {} dictrellmedian = {} dictrelrmedian = {} dictrelgmedian = {} dictrellrn =", "o, r) for (l, o, r) in zip(idxl, idxo, idxr) if o ==", "-*- import numpy as np import theano import theano.tensor as T import logging", "# # SCHEMA-AWARE RANKING FUNCTIONS # # # RANKING FUNCTIONS # def RankRightFnIdx_Schema(fnsim,", "[np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo,", "o, gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return", "dr_domain] pen_idx_r = [cr for cr in range(len(scores_r)) if cr not in dr_range]", "if i in io] rmv_idx_l += [true_triples[i, 0] for i in inter_l if", "(%s):' % (tag)) logging.info('\\t-- left >> mean: %s, median: %s, hits@%s: %s%%' %", "RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = []", "def classification_summary(energyfn, validlidx, validridx, validoidx, valid_targets, testlidx, testridx, testoidx, test_targets): # Find unique", "DOMAIN RELATIONS DURING LEARNING # def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo, rel2domain, rel2range,", "function created with RankRightFnIdx(). :param idxl: list of 'left' indices. :param idxr: list", "# Select the validation triples containing the 'relidx' predicate, and the corresponding target", "relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') # Graph if", "the similarity score of all 'right' entities given couples of relation and 'left'", "for the 'right' operator. :param subtensorspec: only measure the similarity score for the", "o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i for", "T.matrix('g') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing", "- fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') def EnergyVecFn(fnsim,", "g], [simi], on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, relation2domainSet, relation2rangeSet, schemaPenalty,", "target values test_idxs = np.where(testoidx == relidx) r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs],", "i in inter_r if true_triples[i, 2] != r] scores_l = (sl(r, o)[0]).flatten() scores_r", "RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns a Theano function to", "[]]}) for i, j in enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for i, j in", "as T import logging from sklearn import metrics from sparse.learning import parse_embeddings def", "idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') g = T.matrix('g') # Graph if subtensorspec is", ":].T * prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxr, idxo, g],", "if i in io] rmv_idx_l = [true_triples[i, 0] for i in inter_l if", "+ 1] return errl, errr def RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo, selection=[]): \"\"\"", "None: listrel = set(idxo) dictrelres = {} dictrellmean = {} dictrelrmean = {}", "dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())})", "= classify(energies, threshold) comparisons = (targets == classifications) ret = [1. if comparison", "similarity score for the entities corresponding to the first subtensorspec (int) entities of", "threshold else 0 for energy in energies]) return classifications # # CLASSIFICATION FUNCTIONS", "l, o, r in zip(idxl, idxo, idxr): errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1]", "o, r in zip(idxl, idxo, idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o]", "entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'),", "auc = metrics.auc(recall, precision) return auc # # COMPUTING PERFORMANCE METRICS ON RANKINGS", "and the corresponding target values valid_idxs = np.where(validoidx == relidx) r_validlidx, r_validridx, r_validoidx", "errl, errr # # SCHEMA-AWARE RANKING FUNCTIONS # # # RANKING FUNCTIONS #", "np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <=", "FB15k): # def ranking_summary(res, idxo=None, n=10, tag='raw'): resg = res[0] + res[1] dres", "return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') # # LEVERAGING RANGE AND DOMAIN RELATIONS", "np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())})", "= testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets = test_targets[test_idxs] r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches", "rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') # # LEVERAGING RANGE", "pen_simi return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop,", "dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100}) resg = res[0] + res[1]", "il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec) gr", "* prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T simi = simi -", "3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrogmean'],", "theano.function([idxl, idxo, g], [simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding,", "for energy in energies]) return classifications # # CLASSIFICATION FUNCTIONS # def EnergyFn(fnsim,", "sr, idxl, idxr, idxo, true_triples, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = []", "dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100}) logging.info('### MICRO (%s):'", "fnsim(a, b) pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T *", "for l, o, r in zip(idxl, idxo, idxr): errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] +", "io] rmv_idx_l = [true_triples[i,0] for i in inter_l if true_triples[i,0] != l] scores_l", "couples of relation and 'left' entities (as index values). :param fnsim: similarity function", "return ret def classify(energies, threshold): classifications = np.asarray([1 if energy < threshold else", "mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n,", "enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for i, j in enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for", "relidxs = np.unique(idxo) for relidx in relidxs: dr_domain, dr_range = rel2domain[relidx], rel2range[relidx] dr_domain", "if true_triples is not None: il = np.argwhere(true_triples[:, 0] == l).reshape(-1,) io =", "0] != l] scores_l = (slro).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] +", "import theano import theano.tensor as T import logging from sklearn import metrics from", "== relidx) r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs] #", "dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn':", "rel indexes. :param sl: Theano function created with RankLeftFnIdx(). :param sr: Theano function", "in io] rmv_idx_r = [true_triples[i,2] for i in inter_r if true_triples[i,2] != r]", "precision) return auc # # COMPUTING PERFORMANCE METRICS ON RANKINGS # # #", "def classify(energies, threshold): classifications = np.asarray([1 if energy < threshold else 0 for", "index value of the 'left' member. :input idxo: index value of the relation", "validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs] # Evaluate the energies of those triples r_valid_energies", "= (targets == classifications) ret = [1. if comparison == True else 0.", "= [1. if comparison == True else 0. for comparison in comparisons] return", "idxo: list of relation indices. \"\"\" errl = [] errr = [] for", "mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n,", "in energies]) return classifications # # CLASSIFICATION FUNCTIONS # def EnergyFn(fnsim, embeddings, leftop,", "Area Under the Receiver Operating Characteristic Curve (AUC-ROC)''' predictions, labels = np.asarray(predictions), np.asarray(labels)", "+= [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx(sl, sr, idxl, idxr,", "[simi], on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None,", "inter_l if true_triples[i, 0] != l] inter_r = [i for i in il", "Theano function output. :output simi: vector of score values. \"\"\" return theano.function([idxl, idxo],", "[] for l, o, r in zip(idxl, idxo, idxr): errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l]", "= relr(rhs) # Negative Energy simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi", "dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian})", "the embedding of idxl if subtensorspec is not None: # We compute the", "errl, errr def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, true_triples, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None,", "+= [j] for i in listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i]", "leftop, rightop, subtensorspec=None): \"\"\" This function returns a Theano function to measure the", "LEVERAGING RANGE AND DOMAIN RELATIONS DURING LEARNING # def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr,", "if i in io] rmv_idx_r = [true_triples[i, 2] for i in inter_r if", "Energy simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) pen_simi", "for i in selection]: errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l,", "test_targets): # Find unique relation indexes relidxs = np.unique(validoidx) valid_matches, test_matches = [],", "5), n, round(dres['macrorhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%'", "-np.inf scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1]", "o, gl)[0] srlo = sr(l, o, gr)[0] inter_l = [i for i in", "dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian':", "of lhs, rhs and rel indexes. :param sl: Theano function created with RankLeftFnIdx().", "{} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100}) dres.update({'micrormean': np.mean(res[1])})", "in selection]: errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] +", "res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100}) logging.info('### MICRO", "r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) # Select the", "median: %s, hits@%s: %s%%' % ( round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3)))", "+ res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100}) logging.info('###", "the validation triples containing the 'relidx' predicate, and the corresponding target values valid_idxs", "classification_matches(energies, targets, threshold): classifications = classify(energies, threshold) comparisons = (targets == classifications) ret", "the score only for a subset of entities lhs = (embedding.E[:, :subtensorspec]).T else:", "g = T.matrix('g') # Graph if subtensorspec is not None: # We compute", "fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano function inputs. :input idxr: index value of", ":param idxo: list of relation indices. \"\"\" errr = [] for l, o,", "rmv_idx_l += [true_triples[i, 0] for i in inter_l if true_triples[i, 0] != l]", "[true_triples[i, 0] for i in inter_l if true_triples[i, 0] != l] scores_l =", "idxo].T, relationr.E[:, idxo].T energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr,", "(tag)) logging.info('\\t-- left >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microlmean'],", "schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec) slro = sl(r, o, gl)[0] srlo", "idxl, idxr, idxo, true_triples): \"\"\" This function computes the rank list of the", "errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return errl, errr", "= set(idxo) dictrelres = {} dictrellmean = {} dictrelrmean = {} dictrelgmean =", "r in test_triples: rmv_idx_l, rmv_idx_r = [], [] # Remove triples from true_triples", "5), n, round(dres['macrolhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%'", "and the corresponding target values test_idxs = np.where(testoidx == relidx) r_testlidx, r_testridx, r_testoidx", "rell) # a = rell(lhs) # b = relr(rhs) # Negative Energy simi", "n) * 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100})", "idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) tmp = rightop(rhs, relr) simi =", "o, r) in zip(idxl, idxo, idxr) if o == relidx] for l, o,", "idxl if subtensorspec is not None: # We compute the score only for", "(default None: all entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr,", "round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s:", "gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l] *", "def RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo, selection=[]): \"\"\" Similar to RankingScoreIdx, but works", "ir if i in io] rmv_idx_l += [true_triples[i, 0] for i in inter_l", "r_testridx, r_testoidx)[0] test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy: %s -- Test Accuracy:", "hits@%s: %s%%' % ( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3))) logging.info('\\t-- right", "relation indices. \"\"\" errr = [] for l, o, r in zip(idxl, idxo,", "corresponding target values valid_idxs = np.where(validoidx == relidx) r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs],", "range(len(scores_l)) if cl not in dr_domain] pen_idx_r = [cr for cr in range(len(scores_r))", "fnsim(a, b) \"\"\" Theano function inputs. :input idxl: index value of the 'left'", "relationr.D)) # relr: 1xD vector containing the embedding of idxo (relationr) tmp =", "errr # # COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS # def classification_summary(energyfn, validlidx, validridx,", "validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs] # Evaluate the energies of those triples r_valid_energies =", "if cl not in dr_domain] pen_idx_r = [cr for cr in range(len(scores_r)) if", "all entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo =", "# def EnergyFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo,", "[cl for cl in range(len(scores_l)) if cl not in dr_domain] pen_idx_r = [cr", "listrel = set(idxo) dictrelres = {} dictrellmean = {} dictrelrmean = {} dictrelgmean", "and 'right' entities (as index values). :param fnsim: similarity function (on Theano variables).", "= g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T simi", "idxl, idxr, idxo): \"\"\" This function computes the rank list of the lhs", "= [] for l, o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,)", "valid_targets[valid_idxs] # Evaluate the energies of those triples r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0]", "= np.unique(idxo) for relidx in relidxs: dr_domain, dr_range = rel2domain[relidx], rel2range[relidx] dr_domain =", "Iterate over unique relation indexes for relidx in relidxs: # Select the validation", "%s, median: %s, hits@%s: %s%%' % ( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'],", "= [np.mean(classification_matches(energies, targets, cutpoint)) * 100.0 for cutpoint in cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))]", "tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) pen_simi = g[0,", "relr(rhs) simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) \"\"\"", ":param embeddings: an Embeddings instance. :param leftop: class for the 'left' operator. :param", "= schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec) slro = sl(r, o, gl)[0]", "rhs = embedding.E.T # rhs: NxD embedding matrix rell = (relationl.E[:, idxo]).reshape((1, relationl.D))", "tmp = leftop(lhs, rell) # a = rell(lhs) # b = relr(rhs) simi", "dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) * 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean})", "r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets = test_targets[test_idxs] r_test_energies = energyfn(r_testlidx, r_testridx,", "the embedding of idxo (relationr) tmp = leftop(lhs, rell) # a = rell(lhs)", "n) * 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian})", "relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr = [] for l,", "-- Test Accuracy: %s' % ((np.mean(valid_matches) * 100.0), (np.mean(test_matches) * 100.0))) def find_classification_threshold(energies,", "sr(l, o, gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1]", "r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy: %s -- Test Accuracy: %s' % ((np.mean(valid_matches) * 100.0),", "5), n, round(dres['microlhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%'", "%s, hits@%s: %s%%' % ( round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3))) if", "return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None):", "(as in FB15k): # def ranking_summary(res, idxo=None, n=10, tag='raw'): resg = res[0] +", "= [], [] # Remove triples from true_triples from ranking results if true_triples", "0] != l] inter_r = [i for i in il if i in", "rhs, over a list of lhs, rhs and rel indexes. :param sl: Theano", "rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') # # LEVERAGING RANGE AND", "np import theano import theano.tensor as T import logging from sklearn import metrics", "( round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3))) logging.info('\\t-- global >> mean: %s,", "None: all entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo", "Under the Precision-Recall Curve (AUC-PR)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold", "# simi = fnsim(a, b) \"\"\" Theano function inputs. :input idxl: index value", "left >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrolmean'], 5), round(dres['macrolmedian'],", "% ( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3))) logging.info('\\t-- right >> mean:", "dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian':", "dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n':", "theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr", "dres # # RANKING FUNCTIONS # def RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\"", "i in ir if i in io] rmv_idx_l = [true_triples[i,0] for i in", "a list of lhs, rhs and rel indexes. :param sl: Theano function created", "dictrelgmedian = {} dictrellrn = {} dictrelrrn = {} dictrelgrn = {} for", "# -*- coding: utf-8 -*- import numpy as np import theano import theano.tensor", "of 'left' indices. :param idxr: list of 'right' indices. :param idxo: list of", "(AUC-ROC)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.roc_curve(labels, predictions) auc", "= [i for i in il if i in io] rmv_idx_r = [true_triples[i,", "2] for i in inter_r if true_triples[i, 2] != r] scores_r = (srlo).flatten()", "precision, recall, threshold = metrics.roc_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc #", "< threshold else 0 for energy in energies]) return classifications # # CLASSIFICATION", "= -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i for i in", "if cr not in dr_range] scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l] -=", "Similar to RankingScoreIdx, but works on a subset of examples, defined in the", "classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy: %s -- Test Accuracy: %s' % ((np.mean(valid_matches) *", "fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') # # LEVERAGING", "subtensorspec=None): \"\"\" This function returns a Theano function to measure the similarity score", "idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i for i in ir if", "[], [] for l, o, r in [(idxl[i], idxo[i], idxr[i]) for i in", "leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo =", "from ranking results if true_triples is not None: il = np.argwhere(true_triples[:, 0] ==", "= (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) tmp = rightop(rhs, relr)", "# lhs: 1xD vector containing the embedding of idxl if subtensorspec is not", "np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i]", "idxo, idxr) if o == relidx] for l, o, r in test_triples: rmv_idx_l,", "simi = fnsim(a, b) \"\"\" Theano function inputs. :input idxl: index value of", ">> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5),", "rmv_idx_r = [true_triples[i,2] for i in inter_r if true_triples[i,2] != r] scores_r =", "output. :output simi: vector of score values. \"\"\" return theano.function([idxr, idxo], [simi], on_unused_input='ignore')", "round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median:", "AND DOMAIN RELATIONS DURING LEARNING # def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo, rel2domain,", "<reponame>pminervini/DeepKGC # -*- coding: utf-8 -*- import numpy as np import theano import", "idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of idxo (relationr) tmp", "embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') g", "round(dres['microghits@n'], 3))) if idxo is not None: listrel = set(idxo) dictrelres = {}", "= np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0]", "idxo (relationl) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the", "rightop(rhs, relr)) # simi = fnsim(a, b) pen_simi = g[0, :].T * prior.P[idxo,", "tmp.shape[1]))) \"\"\" Theano function inputs. :input idxr: index value of the 'right' member.", "# # # Evaluation summary (as in FB15k): # def ranking_summary(res, idxo=None, n=10,", "for l, o, r in zip(idxl, idxo, idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] *", "similarity score of all 'right' entities given couples of relation and 'left' entities", "[(l, o, r) for (l, o, r) in zip(idxl, idxo, idxr) if o", "= np.argwhere(true_triples[:, 0] == l).reshape(-1,) io = np.argwhere(true_triples[:, 1] == o).reshape(-1,) ir =", "of all 'left' entities given couples of relation and 'right' entities (as index", "= parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') g = T.matrix('g') #", "computes the rank list of the rhs, over a list of lhs, rhs", "FUNCTIONS # def EnergyFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl,", "[true_triples[i,0] for i in inter_l if true_triples[i,0] != l] scores_l = (sl(r, o)[0]).flatten()", "validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs] # Evaluate the energies of those triples", "instance. :param leftop: class for the 'left' operator. :param rightop: class for the", "errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl, errr def RankingScoreIdx_sub(sl, sr, idxl, idxr,", "gr)[0] inter_l = [i for i in ir if i in io] rmv_idx_l", "import numpy as np import theano import theano.tensor as T import logging from", "# Evaluation summary (as in FB15k): # def ranking_summary(res, idxo=None, n=10, tag='raw'): resg", "!= l] scores_l = (sl(r, o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] +", "round(dres['macrolhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%' % (", "np.asarray(labels) precision, recall, threshold = metrics.precision_recall_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc", "dictrelrmedian = {} dictrelgmedian = {} dictrellrn = {} dictrelrrn = {} dictrelgrn", "idxo = T.iscalar('idxr'), T.iscalar('idxo') g = T.matrix('g') # Graph if subtensorspec is not", "- fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') # #", "# LEVERAGING RANGE AND DOMAIN RELATIONS DURING LEARNING # def FilteredRankingScoreIdx_DR(sl, sr, idxl,", "r_valid_cutpoint) # Select the test triples containing the 'relidx' predicate, and the corresponding", "= (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of idxo", "the similarity score of all 'left' entities given couples of relation and 'right'", "left >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microlmean'], 5), round(dres['microlmedian'],", "if true_triples[i, 2] != r] scores_r = (srlo).flatten() scores_r[rmv_idx_r] = -np.inf errr +=", "dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100}) resg = res[0]", "idxo], [simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns", "r) for (l, o, r) in zip(idxl, idxo, idxr) if o == relidx]", "Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') # Graph if subtensorspec is not None:", "dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <=", "EnergyFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr =", "triples containing the 'relidx' predicate, and the corresponding target values valid_idxs = np.where(validoidx", "given couples of relation and 'right' entities (as index values). :param fnsim: similarity", "indices. :param idxo: list of relation indices. \"\"\" errr = [] for l,", "the Receiver Operating Characteristic Curve (AUC-ROC)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall,", "else 0. for comparison in comparisons] return ret def classify(energies, threshold): classifications =", "# # LEVERAGING RANGE AND DOMAIN RELATIONS DURING LEARNING # def FilteredRankingScoreIdx_DR(sl, sr,", "for i in ir if i in io] rmv_idx_l = [true_triples[i,0] for i", "ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i for i in ir if i in io] rmv_idx_l", "ir = np.argwhere(true_triples[:, 2] == r).reshape(-1,) inter_l = [i for i in ir", ":param subtensorspec: only measure the similarity score for the entities corresponding to the", "and rhs, over a list of lhs, rhs and rel indexes. :param sl:", "scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl, errr def RankingScoreIdx_sub(sl,", "returns a Theano function to measure the similarity score of all 'left' entities", "classify(energies, threshold): classifications = np.asarray([1 if energy < threshold else 0 for energy", "embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) energy =", "dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) *", "errl, errr def FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo, true_triples): \"\"\" This function computes", "embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl,", "relation indexes for relidx in relidxs: # Select the validation triples containing the", "relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr))", "= metrics.auc(recall, precision) return auc def auc_roc(predictions=[], labels=[]): '''Computes the Area Under the", "g = T.matrix('g') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD", "np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):' % (tag)) logging.info('\\t-- left >>", ">> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microlmean'], 5), round(dres['microlmedian'], 5),", "= (srlo).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl, errr", "in inter_l if true_triples[i,0] != l] scores_l = (sl(r, o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf", "+= classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy: %s -- Test Accuracy: %s' % ((np.mean(valid_matches)", "[i for i in ir if i in io] rmv_idx_l = [true_triples[i,0] for", "in io] rmv_idx_l = [true_triples[i,0] for i in inter_l if true_triples[i,0] != l]", "= metrics.roc_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc # # COMPUTING PERFORMANCE", "is not None: # We compute the score only for a subset of", "the 'left' member. :input idxo: index value of the relation member. Theano function", "ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l]", "-np.inf scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty errl += [np.argsort(np.argsort(-", "i in il if i in io] rmv_idx_r += [true_triples[i, 2] for i", "il if i in io] rmv_idx_r += [true_triples[i, 2] for i in inter_r", "(sl(r, o)[0]).flatten() scores_r = (sr(l, o)[0]).flatten() # Remove triples not in domain and", "= fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) \"\"\" Theano function", "def RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns a Theano function", "(relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) tmp = rightop(rhs, relr) simi", "= - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') def", "np.unique(idxo) for relidx in relidxs: dr_domain, dr_range = rel2domain[relidx], rel2range[relidx] dr_domain = set(dr_domain)", "valid_targets, testlidx, testridx, testoidx, test_targets): # Find unique relation indexes relidxs = np.unique(validoidx)", "logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrormean'], 5),", "[np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errr # # COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS", "of the embedding matrix (default None: all entities). \"\"\" embedding, relationl, relationr =", "fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) pen_simi = g[0, :].T", "import parse_embeddings def auc_pr(predictions=[], labels=[]): '''Computes the Area Under the Precision-Recall Curve (AUC-PR)'''", "r_valid_targets) valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) # Select the test triples containing the", "Theano function to measure the similarity score of all 'left' entities given couples", "rmv_idx_r = [true_triples[i, 2] for i in inter_r if true_triples[i, 2] != r]", "in ir if i in io] rmv_idx_l += [true_triples[i, 0] for i in", "subtensorspec: only measure the similarity score for the entities corresponding to the first", "median: %s, hits@%s: %s%%' % ( round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3)))", "on a subset of examples, defined in the 'selection' parameter. \"\"\" errl, errr", "[] for l, o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl", "i in listrel: dictrelres.update({i: [[], []]}) for i, j in enumerate(res[0]): dictrelres[idxo[i]][0] +=", "simi = simi - pen_simi return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim,", "rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings, leftop,", "= leftop(lhs, rell) # a = rell(lhs) # b = relr(rhs) simi =", "relr)) # simi = fnsim(a, b) \"\"\" Theano function inputs. :input idxl: index", "and rel indexes. :param sl: Theano function created with RankLeftFnIdx(). :param sr: Theano", "predictions) auc = metrics.auc(recall, precision) return auc def auc_roc(predictions=[], labels=[]): '''Computes the Area", "# # RANKING FUNCTIONS # def RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This", "leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.ivector('idxl'), T.ivector('idxo'),", "zip(idxl, idxo, idxr): errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errr # #", "r in zip(idxl, idxo, idxr): errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errr", "* 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) * 100 dres.update({'dictrelres': dictrelres})", "Curve (AUC-ROC)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.roc_curve(labels, predictions)", "\"\"\" errr = [] for l, o, r in zip(idxl, idxo, idxr): errr", "np.argwhere(true_triples[:, 0] == l).reshape(-1,) io = np.argwhere(true_triples[:, 1] == o).reshape(-1,) ir = np.argwhere(true_triples[:,", "np.where(validoidx == relidx) r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs]", "idxl, idxo, idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs, rhs = embedding.E[:, idxl].T, embedding.E[:,", "pen_simi return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl, sr, idxl, idxr,", "r] scores_r = (srlo).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return", "errr def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, true_triples, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None):", "dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s, median:", "of the 'left' member. :input idxo: index value of the relation member. Theano", "np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100}) resg = res[0] +", "indices. :param idxr: list of 'right' indices. :param idxo: list of relation indices.", "idxo, g], [simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl,", "in zip(idxl, idxo, idxr) if o == relidx] for l, o, r in", ":].T * prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxl, idxo, g],", "# def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None): errl, errr", "= {} dictrelrrn = {} dictrelgrn = {} for i in listrel: dictrelres.update({i:", "[true_triples[i, 2] for i in inter_r if true_triples[i, 2] != r] scores_l =", "score only for a subset of entities rhs = (embedding.E[:, :subtensorspec]).T else: rhs", "def auc_pr(predictions=[], labels=[]): '''Computes the Area Under the Precision-Recall Curve (AUC-PR)''' predictions, labels", "for comparison in comparisons] return ret def classify(energies, threshold): classifications = np.asarray([1 if", "function inputs. :input idxr: index value of the 'right' member. :input idxo: index", "matrix (default None: all entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs", "[] # Iterate over unique relation indexes for relidx in relidxs: # Select", "= - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') #", "validation triples containing the 'relidx' predicate, and the corresponding target values valid_idxs =", "mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n,", "r) in zip(idxl, idxo, idxr) if o == relidx] for l, o, r", "the rank list of the rhs, over a list of lhs, rhs and", "fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1,", "errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errr # # COMPUTING PERFORMANCE METRICS", "tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) \"\"\" Theano function inputs. :input", "T.iscalar('idxl'), T.iscalar('idxo') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector", "%s, hits@%s: %s%%' % ( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3))) return", "matrix rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding", "# Remove triples from true_triples from ranking results if true_triples is not None:", "<= n) * 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) * 100", "== True else 0. for comparison in comparisons] return ret def classify(energies, threshold):", "il if i in io] rmv_idx_r = [true_triples[i, 2] for i in inter_r", "operator. :param rightop: class for the 'right' operator. :param subtensorspec: only measure the", "= energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint)", "in zip(idxl, idxo, idxr): errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errr #", "1] return errr # # COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS # def classification_summary(energyfn,", "l).reshape(-1,) io = np.argwhere(true_triples[:, 1] == o).reshape(-1,) ir = np.argwhere(true_triples[:, 2] == r).reshape(-1,)", "o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo, true_triples):", "comparison in comparisons] return ret def classify(energies, threshold): classifications = np.asarray([1 if energy", "n, round(dres['microrhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%' %", "indices. \"\"\" errl = [] errr = [] for l, o, r in", "o, gl)[0] srlo = sr(l, o, gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr", "np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100}) logging.info('### MICRO (%s):' %", "mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n,", "in enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for i in listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i]", "= [i for i in ir if i in io] rmv_idx_l = [true_triples[i,", "dr_domain = set(dr_domain) dr_range = set(dr_range) test_triples = [(l, o, r) for (l,", "SCHEMA-AWARE RANKING FUNCTIONS # # # RANKING FUNCTIONS # def RankRightFnIdx_Schema(fnsim, embeddings, prior,", "rank list of the lhs and rhs, over a list of lhs, rhs", "+= [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1] return errl, errr # # SCHEMA-AWARE RANKING FUNCTIONS", "o, r in zip(idxl, idxo, idxr): errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr", "# relr: 1xD vector containing the embedding of idxo (relationr) tmp = leftop(lhs,", "prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore')", "RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo, selection=[]): \"\"\" Similar to RankingScoreIdx, but works on", "function computes the rank list of the rhs, over a list of lhs,", "errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i for i in il if", "only measure the similarity score for the entities corresponding to the first subtensorspec", "l, o, r in [(idxl[i], idxo[i], idxr[i]) for i in selection]: errl +=", "T.iscalar('idxr'), T.iscalar('idxo') # Graph if subtensorspec is not None: # We compute the", "round(dres['microlhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%' % (", "gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec) slro = sl(r, o,", "idxo, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr = [] for", "score for the entities corresponding to the first subtensorspec (int) entities of the", "in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i for i in", "[true_triples[i, 2] for i in inter_r if true_triples[i, 2] != r] scores_r =", "Remove triples from true_triples from ranking results if true_triples is not None: il", "CLASSIFICATION FUNCTIONS # def EnergyFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings)", "# # CLASSIFICATION FUNCTIONS # def EnergyFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr", "# Negative Energy simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a,", "#@profile def RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl", "np.mean(np.asarray(dictrelres[i][1]) <= n) * 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) *", "RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs", "couples of relation and 'right' entities (as index values). :param fnsim: similarity function", "rell(lhs) # b = relr(rhs) simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi", "find_classification_threshold(energies, targets): x = np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]], (x[1:] + x[:-1]) / 2.,", "idxr, idxo, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr = []", "[np.mean(classification_matches(energies, targets, cutpoint)) * 100.0 for cutpoint in cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return", "1xD vector containing the embedding of idxl if subtensorspec is not None: #", "energy < threshold else 0 for energy in energies]) return classifications # #", "idxo, g], [simi], on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, relation2domainSet, relation2rangeSet,", "relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano function inputs. :input idxr:", "all 'left' entities given couples of relation and 'right' entities (as index values).", "score of all 'right' entities given couples of relation and 'left' entities (as", "= T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) rhs = (embedding.E[:, idxr]).reshape((1,", "== r).reshape(-1,) inter_l = [i for i in ir if i in io]", "auc def auc_roc(predictions=[], labels=[]): '''Computes the Area Under the Receiver Operating Characteristic Curve", "1].T simi = simi - pen_simi return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore') def", "np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.precision_recall_curve(labels, predictions) auc = metrics.auc(recall, precision) return", "% ( round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3))) logging.info('\\t-- global >> mean:", "idxo = T.iscalar('idxr'), T.iscalar('idxo') # Graph if subtensorspec is not None: # We", "gl)[0] srlo = sr(l, o, gr)[0] inter_l = [i for i in ir", "cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def classification_matches(energies, targets, threshold): classifications = classify(energies,", "function returns a Theano function to measure the similarity score of all 'left'", "logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['micrormean'], 5),", "[energy], on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl,", "sr(l, o, gr)[0] inter_l = [i for i in ir if i in", "mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n,", "Theano function output. :output simi: vector of score values. \"\"\" return theano.function([idxr, idxo],", "# def RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns a Theano", "valid_matches, test_matches = [], [] # Iterate over unique relation indexes for relidx", "= {} for i in listrel: dictrelres.update({i: [[], []]}) for i, j in", "triples containing the 'relidx' predicate, and the corresponding target values test_idxs = np.where(testoidx", "r_testoidx)[0] test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy: %s -- Test Accuracy: %s'", "2] for i in inter_r if true_triples[i, 2] != r] scores_l = (sl(r,", "precision) return auc def auc_roc(predictions=[], labels=[]): '''Computes the Area Under the Receiver Operating", "+ 1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx_Schema(sl, sr,", "lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:,", ":].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T simi = simi", "hits@%s: %s%%' % ( round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3))) logging.info('\\t-- global", "sr, idxl, idxr, idxo, selection=[]): \"\"\" Similar to RankingScoreIdx, but works on a", "auc_roc(predictions=[], labels=[]): '''Computes the Area Under the Receiver Operating Characteristic Curve (AUC-ROC)''' predictions,", "relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') # Graph", "r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) # Select the test", "testoidx[test_idxs] r_test_targets = test_targets[test_idxs] r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches += classification_matches(r_test_energies, r_test_targets,", "true_triples[i,2] != r] scores_r = (sr(l, o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r]", "100.0 for cutpoint in cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def classification_matches(energies, targets,", "idxl, idxr, idxo, true_triples, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr", "idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None): errl, errr = [], [] relidxs =", "return dres # # RANKING FUNCTIONS # def RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None):", "on_unused_input='ignore') def RankingScoreIdx(sl, sr, idxl, idxr, idxo): \"\"\" This function computes the rank", "[i for i in ir if i in io] rmv_idx_l = [true_triples[i, 0]", "of all 'right' entities given couples of relation and 'left' entities (as index", "inter_r if true_triples[i,2] != r] scores_r = (sr(l, o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf errr", "and rel indexes. :param sr: Theano function created with RankRightFnIdx(). :param idxl: list", "dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] =", "measure the similarity score of all 'right' entities given couples of relation and", "= np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) * 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean':", "classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) # Select the test triples containing the 'relidx' predicate, and", "# We compute the score only for a subset of entities rhs =", "EnergyVecFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr =", "{} dictrelgmedian = {} dictrellrn = {} dictrelrrn = {} dictrelgrn = {}", "in il if i in io] rmv_idx_r = [true_triples[i,2] for i in inter_r", "enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for i in listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] =", "subset of entities rhs = (embedding.E[:, :subtensorspec]).T else: rhs = embedding.E.T # rhs:", "3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microgmean'],", "r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] *", "in ir if i in io] rmv_idx_l = [true_triples[i,0] for i in inter_l", "rhs = embedding.E[:, idxl].T, embedding.E[:, idxr].T rell, relr = relationl.E[:, idxo].T, relationr.E[:, idxo].T", "i in io] rmv_idx_l += [true_triples[i, 0] for i in inter_l if true_triples[i,", "entities lhs = (embedding.E[:, :subtensorspec]).T else: lhs = embedding.E.T rhs = (embedding.E[:, idxr]).reshape((1,", "dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] =", "# RANKING FUNCTIONS # def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl,", "np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO", "score only for a subset of entities lhs = (embedding.E[:, :subtensorspec]).T else: lhs", "idxo: index value of the relation member. Theano function output. :output simi: vector", "errr = [] for l, o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,)", "relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings, leftop, rightop): embedding,", "round(dres['macrorhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%' % (", "predicate, and the corresponding target values test_idxs = np.where(testoidx == relidx) r_testlidx, r_testridx,", "def EnergyFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr", "<= n) * 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) *", "sr, idxl, idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None): errl, errr = [], []", "relr(rhs) # Negative Energy simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi =", "[] for l, o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l", "# # Evaluation summary (as in FB15k): # def ranking_summary(res, idxo=None, n=10, tag='raw'):", "= [], [] # Iterate over unique relation indexes for relidx in relidxs:", "(np.mean(test_matches) * 100.0))) def find_classification_threshold(energies, targets): x = np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]], (x[1:]", "dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn})", "= np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]], (x[1:] + x[:-1]) / 2., [x[-1]])) accuracies =", "if i in io] rmv_idx_r = [true_triples[i,2] for i in inter_r if true_triples[i,2]", "of the rhs, over a list of lhs, rhs and rel indexes. :param", "T.matrix('g') # Graph if subtensorspec is not None: # We compute the score", "dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn})", "not in dr_range] scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r]", "rel2range, illegal_dr_penalty=1e6, true_triples=None): errl, errr = [], [] relidxs = np.unique(idxo) for relidx", "parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') g = T.matrix('g') # Graph", "in [(idxl[i], idxo[i], idxr[i]) for i in selection]: errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] +", "prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore')", "# def ranking_summary(res, idxo=None, n=10, tag='raw'): resg = res[0] + res[1] dres =", "RankingScoreIdx, but works on a subset of examples, defined in the 'selection' parameter.", "embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') #", "index value of the 'right' member. :input idxo: index value of the relation", "def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, true_triples, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl", "simi: vector of score values. \"\"\" return theano.function([idxr, idxo], [simi], on_unused_input='ignore') def RankingScoreIdx(sl,", "the Area Under the Receiver Operating Characteristic Curve (AUC-ROC)''' predictions, labels = np.asarray(predictions),", "similarity function (on Theano variables). :param embeddings: an Embeddings instance. :param leftop: class", "dr_domain, dr_range = rel2domain[relidx], rel2range[relidx] dr_domain = set(dr_domain) dr_range = set(dr_range) test_triples =", "= np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1])", "for i in ir if i in io] rmv_idx_l += [true_triples[i, 0] for", "dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n':", "2] == r).reshape(-1,) inter_l = [i for i in ir if i in", "dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100}) resg = res[0] + res[1] dres.update({'microgmean': np.mean(resg)})", "{} dictrellrn = {} dictrelrrn = {} dictrelgrn = {} for i in", "[true_triples[i, 0] for i in inter_l if true_triples[i, 0] != l] inter_r =", "idxo]).reshape((1, relationr.D)) tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) \"\"\"", "+ g[1, :].T * prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxl,", "idxr, idxo, true_triples, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr =", "testlidx, testridx, testoidx, test_targets): # Find unique relation indexes relidxs = np.unique(validoidx) valid_matches,", "<= n) * 100}) resg = res[0] + res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)})", "j in enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for i in listrel: dictrellmean[i] = np.mean(dictrelres[i][0])", "io = np.argwhere(true_triples[:, 1] == o).reshape(-1,) ir = np.argwhere(true_triples[:, 2] == r).reshape(-1,) inter_l", "i in ir if i in io] rmv_idx_l += [true_triples[i, 0] for i", "i in inter_r if true_triples[i,2] != r] scores_r = (sr(l, o)[0]).flatten() scores_r[rmv_idx_r] =", "first subtensorspec (int) entities of the embedding matrix (default None: all entities). \"\"\"", "idxo=None, n=10, tag='raw'): resg = res[0] + res[1] dres = {} dres.update({'microlmean': np.mean(res[0])})", "\"\"\" errl = [] errr = [] for l, o, r in zip(idxl,", "theano import theano.tensor as T import logging from sklearn import metrics from sparse.learning", "r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy: %s", "containing the embedding of idxl if subtensorspec is not None: # We compute", "# Evaluate the energies of those triples r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint", "[j] for i in listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] =", "dictrelres[idxo[i]][1] += [j] for i in listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1])", "Theano variables). :param embeddings: an Embeddings instance. :param leftop: class for the 'left'", "classifications = np.asarray([1 if energy < threshold else 0 for energy in energies])", "return errr # # COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS # def classification_summary(energyfn, validlidx,", "+ g[1, :].T * prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxr,", "inter_l = [i for i in ir if i in io] rmv_idx_l +=", "g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T simi =", "'right' indices. :param idxo: list of relation indices. \"\"\" errl = [] errr", "= (relationr.E[:, idxo]).reshape((1, relationr.D)) energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl,", "dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian':", "r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i for i", "Receiver Operating Characteristic Curve (AUC-ROC)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold", "+= [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i for i in il if i", "np.unique(validoidx) valid_matches, test_matches = [], [] # Iterate over unique relation indexes for", "function output. :output simi: vector of score values. \"\"\" return theano.function([idxr, idxo], [simi],", "Area Under the Precision-Recall Curve (AUC-PR)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall,", "= (sr(l, o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl,", "return best_cutpoint def classification_matches(energies, targets, threshold): classifications = classify(energies, threshold) comparisons = (targets", "== relidx] for l, o, r in test_triples: rmv_idx_l, rmv_idx_r = [], []", "median: %s, hits@%s: %s%%' % ( round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3)))", ":param sl: Theano function created with RankLeftFnIdx(). :param sr: Theano function created with", "# Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') g = T.matrix('g') # Graph if", "fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings,", "embedding.D)) rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr =", "if i in io] rmv_idx_l = [true_triples[i,0] for i in inter_l if true_triples[i,0]", "embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs,", "else: rhs = embedding.E.T # rhs: NxD embedding matrix rell = (relationl.E[:, idxo]).reshape((1,", "round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median:", "rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of", "idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) rhs =", "triples not in domain and range from ranking results pen_idx_l = [cl for", "r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches += classification_matches(r_valid_energies, r_valid_targets,", "[i for i in ir if i in io] rmv_idx_l += [true_triples[i, 0]", "o, r in [(idxl[i], idxo[i], idxr[i]) for i in selection]: errl += [np.argsort(np.argsort((sl(r,", ":param sr: Theano function created with RankRightFnIdx(). :param idxl: list of 'left' indices.", "FUNCTIONS # def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr =", "logging.info('\\t-- left >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrolmean'], 5),", "dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <=", "= [i for i in ir if i in io] rmv_idx_l = [true_triples[i,0]", "+= [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return errl, errr def", "This function computes the rank list of the rhs, over a list of", "\"\"\" return theano.function([idxl, idxo], [simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\"", ":subtensorspec]).T else: rhs = embedding.E.T # rhs: NxD embedding matrix rell = (relationl.E[:,", "i in io] rmv_idx_r = [true_triples[i, 2] for i in inter_r if true_triples[i,", "100}) resg = res[0] + res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <=", "member. Theano function output. :output simi: vector of score values. \"\"\" return theano.function([idxl,", "= T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs, rhs = embedding.E[:, idxl].T, embedding.E[:, idxr].T rell, relr", "= relr(rhs) simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b)", "Embeddings instance. :param leftop: class for the 'left' operator. :param rightop: class for", "cutpoints = np.concatenate(([x[0]], (x[1:] + x[:-1]) / 2., [x[-1]])) accuracies = [np.mean(classification_matches(energies, targets,", "# # COMPUTING PERFORMANCE METRICS ON RANKINGS # # # Evaluation summary (as", "np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100", "of 'right' indices. :param idxo: list of relation indices. \"\"\" errl = []", "in listrel: dictrelres.update({i: [[], []]}) for i, j in enumerate(res[0]): dictrelres[idxo[i]][0] += [j]", "for i in inter_l if true_triples[i, 0] != l] scores_l = (slro).flatten() scores_l[rmv_idx_l]", "dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())})", "idxl]).reshape((1, embedding.D)) rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr", "scores_r = (sr(l, o)[0]).flatten() # Remove triples not in domain and range from", "relidx) r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs] # Evaluate", "= {} dictrellmean = {} dictrelrmean = {} dictrelgmean = {} dictrellmedian =", "class for the 'left' operator. :param rightop: class for the 'right' operator. :param", "pen_idx_l = [cl for cl in range(len(scores_l)) if cl not in dr_domain] pen_idx_r", "round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3))) if idxo is not None: listrel", "embedding matrix rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the", "= metrics.precision_recall_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc def auc_roc(predictions=[], labels=[]): '''Computes", "idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l]", "+= [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1] errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1] return errl,", "np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):' %", "parse_embeddings def auc_pr(predictions=[], labels=[]): '''Computes the Area Under the Precision-Recall Curve (AUC-PR)''' predictions,", "entities (as index values). :param fnsim: similarity function (on Theano variables). :param embeddings:", ":output simi: vector of score values. \"\"\" return theano.function([idxr, idxo], [simi], on_unused_input='ignore') def", "rightop, subtensorspec=None): \"\"\" This function returns a Theano function to measure the similarity", "= rell(lhs) # b = relr(rhs) # Negative Energy simi = fnsim(tmp.reshape((1, tmp.shape[1])),", "= parse_embeddings(embeddings) idxl, idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs = (embedding.E[:, idxl]).reshape((1,", "with RankLeftFnIdx(). :param sr: Theano function created with RankRightFnIdx(). :param idxl: list of", "rhs and rel indexes. :param sl: Theano function created with RankLeftFnIdx(). :param sr:", "idxl, idxr, idxo): \"\"\" This function computes the rank list of the rhs,", "This function computes the rank list of the lhs and rhs, over a", "idxl, idxr, idxo, selection=[]): \"\"\" Similar to RankingScoreIdx, but works on a subset", "of those triples r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches", "[] relidxs = np.unique(idxo) for relidx in relidxs: dr_domain, dr_range = rel2domain[relidx], rel2range[relidx]", "entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'),", "relation indexes relidxs = np.unique(validoidx) valid_matches, test_matches = [], [] # Iterate over", "+ 1] inter_r = [i for i in il if i in io]", "= simi - pen_simi return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl,", ":param idxo: list of relation indices. \"\"\" errl = [] errr = []", "relation and 'right' entities (as index values). :param fnsim: similarity function (on Theano", "\"\"\" errl, errr = [], [] for l, o, r in [(idxl[i], idxo[i],", "(relationr) tmp = leftop(lhs, rell) # a = rell(lhs) # b = relr(rhs)", "hits@%s: %s%%' % ( round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3))) if idxo", "on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None):", "for i in inter_r if true_triples[i,2] != r] scores_r = (sr(l, o)[0]).flatten() scores_r[rmv_idx_r]", "= parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') # Graph if subtensorspec", "dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100}) logging.info('### MICRO (%s):' % (tag)) logging.info('\\t-- left", "# COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS # def classification_summary(energyfn, validlidx, validridx, validoidx, valid_targets,", "(embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D))", "entities rhs = (embedding.E[:, :subtensorspec]).T else: rhs = embedding.E.T # rhs: NxD embedding", "RankLeftFnIdx(). :param sr: Theano function created with RankRightFnIdx(). :param idxl: list of 'left'", "%s%%' % ( round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3))) if idxo is", "= fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) pen_simi = g[0, :].T * prior.P[idxo, 0].T +", "idxo, idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec) gr =", "threshold): classifications = np.asarray([1 if energy < threshold else 0 for energy in", "scores_r)).flatten()[r] + 1] return errl, errr # # SCHEMA-AWARE RANKING FUNCTIONS # #", "5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s,", "100.0))) def find_classification_threshold(energies, targets): x = np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]], (x[1:] + x[:-1])", "for the entities corresponding to the first subtensorspec (int) entities of the embedding", "idxo = T.iscalar('idxl'), T.iscalar('idxo') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs:", "None: all entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo", "errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx(sl, sr, idxl,", "list of relation indices. \"\"\" errr = [] for l, o, r in", "(relationr.E[:, idxo]).reshape((1, relationr.D)) energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr,", "for l, o, r in test_triples: rmv_idx_l, rmv_idx_r = [], [] # Remove", "classifications # # CLASSIFICATION FUNCTIONS # def EnergyFn(fnsim, embeddings, leftop, rightop): embedding, relationl,", "= {} dictrellrn = {} dictrelrrn = {} dictrelgrn = {} for i", "Test Accuracy: %s' % ((np.mean(valid_matches) * 100.0), (np.mean(test_matches) * 100.0))) def find_classification_threshold(energies, targets):", "idxr, idxo], [energy], on_unused_input='ignore') # # LEVERAGING RANGE AND DOMAIN RELATIONS DURING LEARNING", "= {} dictrelgrn = {} for i in listrel: dictrelres.update({i: [[], []]}) for", "return errl, errr def FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo, true_triples): \"\"\" This function", "'relidx' predicate, and the corresponding target values valid_idxs = np.where(validoidx == relidx) r_validlidx,", "Accuracy: %s' % ((np.mean(valid_matches) * 100.0), (np.mean(test_matches) * 100.0))) def find_classification_threshold(energies, targets): x", "tmp = leftop(lhs, rell) # a = rell(lhs) # b = relr(rhs) #", "def RankingScoreRightIdx(sr, idxl, idxr, idxo): \"\"\" This function computes the rank list of", "(targets == classifications) ret = [1. if comparison == True else 0. for", "a subset of entities lhs = (embedding.E[:, :subtensorspec]).T else: lhs = embedding.E.T rhs", "%s, hits@%s: %s%%' % ( round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3))) logging.info('\\t--", "not None: # We compute the score only for a subset of entities", "relationl.D)) # rell: 1xD vector containing the embedding of idxo (relationl) relr =", "validridx, validoidx, valid_targets, testlidx, testridx, testoidx, test_targets): # Find unique relation indexes relidxs", "range from ranking results pen_idx_l = [cl for cl in range(len(scores_l)) if cl", "METRICS ON RANKINGS # # # Evaluation summary (as in FB15k): # def", "relation and 'left' entities (as index values). :param fnsim: similarity function (on Theano", "tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) pen_simi = g[0, :].T *", "ON CLASSIFICATIONS # def classification_summary(energyfn, validlidx, validridx, validoidx, valid_targets, testlidx, testridx, testoidx, test_targets):", "gl)[0] srlo = sr(l, o, gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr +=", "the lhs and rhs, over a list of lhs, rhs and rel indexes.", "the 'relidx' predicate, and the corresponding target values valid_idxs = np.where(validoidx == relidx)", "i in il if i in io] rmv_idx_r = [true_triples[i,2] for i in", "dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('###", "idxr) if o == relidx] for l, o, r in test_triples: rmv_idx_l, rmv_idx_r", "created with RankLeftFnIdx(). :param sr: Theano function created with RankRightFnIdx(). :param idxl: list", "T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) rhs = (embedding.E[:, idxr]).reshape((1, embedding.D))", "results if true_triples is not None: il = np.argwhere(true_triples[:, 0] == l).reshape(-1,) io", "T.iscalar('idxr'), T.iscalar('idxo') g = T.matrix('g') # Graph if subtensorspec is not None: #", "x = np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]], (x[1:] + x[:-1]) / 2., [x[-1]])) accuracies", "resg = res[0] + res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n)", "errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr,", "tmp.reshape((1, tmp.shape[1]))) pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T *", "Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') g = T.matrix('g') # Graph if subtensorspec", "round(dres['macroghits@n'], 3))) return dres # # RANKING FUNCTIONS # def RankRightFnIdx(fnsim, embeddings, leftop,", "idxo, idxr): errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errr # # COMPUTING", "o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl, errr def", "from sklearn import metrics from sparse.learning import parse_embeddings def auc_pr(predictions=[], labels=[]): '''Computes the", "similarity score of all 'left' entities given couples of relation and 'right' entities", "MACRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s, median: %s, hits@%s: %s%%'", "dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn':", "100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) * 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean':", "RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns a Theano function to", "the relation member. Theano function output. :output simi: vector of score values. \"\"\"", "idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs, rhs = embedding.E[:, idxl].T, embedding.E[:, idxr].T rell,", "for relidx in relidxs: # Select the validation triples containing the 'relidx' predicate,", "embedding of idxl if subtensorspec is not None: # We compute the score", "import logging from sklearn import metrics from sparse.learning import parse_embeddings def auc_pr(predictions=[], labels=[]):", "= rell(lhs) # b = relr(rhs) simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) #", "def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None): errl, errr =", "rmv_idx_r += [true_triples[i, 2] for i in inter_r if true_triples[i, 2] != r]", "rel indexes. :param sr: Theano function created with RankRightFnIdx(). :param idxl: list of", "errl = [] errr = [] for l, o, r in zip(idxl, idxo,", "1] return errl, errr def RankingScoreRightIdx(sr, idxl, idxr, idxo): \"\"\" This function computes", "# CLASSIFICATION FUNCTIONS # def EnergyFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr =", "5), n, round(dres['microrhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%'", "* l_subtensorspec, [o] * l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] *", "idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i for i in ir if i", "\"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo')", "vector of score values. \"\"\" return theano.function([idxr, idxo], [simi], on_unused_input='ignore') def RankingScoreIdx(sl, sr,", "o)[0]).flatten() # Remove triples not in domain and range from ranking results pen_idx_l", "1] inter_r = [i for i in il if i in io] rmv_idx_r", "1xD vector containing the embedding of idxo (relationr) tmp = leftop(lhs, rell) #", "def classification_matches(energies, targets, threshold): classifications = classify(energies, threshold) comparisons = (targets == classifications)", "embedding.E[:, idxl].T, embedding.E[:, idxr].T rell, relr = relationl.E[:, idxo].T, relationr.E[:, idxo].T energy =", "i in inter_l if true_triples[i, 0] != l] inter_r = [i for i", "sr, idxl, idxr, idxo, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr", "dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn})", "the embedding matrix (default None: all entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings)", "over a list of lhs, rhs and rel indexes. :param sl: Theano function", "0] == l).reshape(-1,) io = np.argwhere(true_triples[:, 1] == o).reshape(-1,) ir = np.argwhere(true_triples[:, 2]", "r_valid_targets = valid_targets[valid_idxs] # Evaluate the energies of those triples r_valid_energies = energyfn(r_validlidx,", "RankingScoreIdx(sl, sr, idxl, idxr, idxo): \"\"\" This function computes the rank list of", "inter_l if true_triples[i,0] != l] scores_l = (sl(r, o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf errl", "% ( round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3))) logging.info('\\t-- global >> mean:", "in inter_r if true_triples[i,2] != r] scores_r = (sr(l, o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf", "DURING LEARNING # def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None):", "idxl: index value of the 'left' member. :input idxo: index value of the", "tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano function", "energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore')", "subtensorspec (int) entities of the embedding matrix (default None: all entities). \"\"\" embedding,", "dictrellmean = {} dictrelrmean = {} dictrelgmean = {} dictrellmedian = {} dictrelrmedian", "+ 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx(sl,", "zip(idxl, idxo, idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec) gr", "= T.iscalar('idxr'), T.iscalar('idxo') # Graph if subtensorspec is not None: # We compute", "# Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') g = T.matrix('g') # Graph lhs", "i in ir if i in io] rmv_idx_l = [true_triples[i, 0] for i", "cutpoint)) * 100.0 for cutpoint in cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def", "simi: vector of score values. \"\"\" return theano.function([idxl, idxo], [simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim,", "hits@%s: %s%%' % ( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3))) return dres", "relidxs: dr_domain, dr_range = rel2domain[relidx], rel2range[relidx] dr_domain = set(dr_domain) dr_range = set(dr_range) test_triples", "'left' entities given couples of relation and 'right' entities (as index values). :param", "relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') g = T.matrix('g')", "FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, true_triples, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl =", "an Embeddings instance. :param leftop: class for the 'left' operator. :param rightop: class", "def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) #", "for i, j in enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for i, j in enumerate(res[1]):", "relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs = (embedding.E[:,", "T.iscalar('idxo'), T.iscalar('idxr') lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell", "# # # RANKING FUNCTIONS # def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None):", "logging from sklearn import metrics from sparse.learning import parse_embeddings def auc_pr(predictions=[], labels=[]): '''Computes", "= [true_triples[i, 2] for i in inter_r if true_triples[i, 2] != r] scores_r", "for (l, o, r) in zip(idxl, idxo, idxr) if o == relidx] for", "[simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr =", "= parse_embeddings(embeddings) idxl, idxo, idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs, rhs = embedding.E[:,", "0] for i in inter_l if true_triples[i, 0] != l] scores_l = (slro).flatten()", "%s, hits@%s: %s%%' % ( round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3))) logging.info('\\t--", "of lhs, rhs and rel indexes. :param sr: Theano function created with RankRightFnIdx().", "r_test_targets = test_targets[test_idxs] r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint)", "import theano.tensor as T import logging from sklearn import metrics from sparse.learning import", "idxo, selection=[]): \"\"\" Similar to RankingScoreIdx, but works on a subset of examples,", "targets): x = np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]], (x[1:] + x[:-1]) / 2., [x[-1]]))", "if comparison == True else 0. for comparison in comparisons] return ret def", "i in inter_l if true_triples[i, 0] != l] scores_l = (slro).flatten() scores_l[rmv_idx_l] =", "vector containing the embedding of idxo (relationl) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) #", "in relidxs: dr_domain, dr_range = rel2domain[relidx], rel2range[relidx] dr_domain = set(dr_domain) dr_range = set(dr_range)", "- pen_simi return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop,", "range(len(scores_r)) if cr not in dr_range] scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l]", "in inter_r if true_triples[i, 2] != r] scores_r = (srlo).flatten() scores_r[rmv_idx_r] = -np.inf", "o, r in zip(idxl, idxo, idxr): errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return", "= {} dictrelgmean = {} dictrellmedian = {} dictrelrmedian = {} dictrelgmedian =", "the entities corresponding to the first subtensorspec (int) entities of the embedding matrix", "r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs] # Evaluate the energies", "pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T", "dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian':", "* 100}) logging.info('### MICRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s, median:", "relationr.D)) energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy],", ":param idxl: list of 'left' indices. :param idxr: list of 'right' indices. :param", "o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def", "in dr_range] scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r] -=", "RANKING FUNCTIONS # def RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns", "= validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs] # Evaluate the energies of those", "energy in energies]) return classifications # # CLASSIFICATION FUNCTIONS # def EnergyFn(fnsim, embeddings,", "r_valid_cutpoint) logging.info('Validation Accuracy: %s -- Test Accuracy: %s' % ((np.mean(valid_matches) * 100.0), (np.mean(test_matches)", "= rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano function inputs.", "-*- coding: utf-8 -*- import numpy as np import theano import theano.tensor as", "100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1])", "r_subtensorspec=None): errl = [] errr = [] for l, o, r in zip(idxl,", "relidx) r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets = test_targets[test_idxs] r_test_energies =", "%s' % ((np.mean(valid_matches) * 100.0), (np.mean(test_matches) * 100.0))) def find_classification_threshold(energies, targets): x =", "== classifications) ret = [1. if comparison == True else 0. for comparison", "inputs. :input idxl: index value of the 'left' member. :input idxo: index value", "+ dictrelres[i][1]) <= n) * 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean':", "errr = [] for l, o, r in zip(idxl, idxo, idxr): errr +=", "ret = [1. if comparison == True else 0. for comparison in comparisons]", "( round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3))) logging.info('\\t-- right >> mean: %s,", "function to measure the similarity score of all 'left' entities given couples of", "5), n, round(dres['microghits@n'], 3))) if idxo is not None: listrel = set(idxo) dictrelres", "{} dictrellmean = {} dictrelrmean = {} dictrelgmean = {} dictrellmedian = {}", "1].T simi = simi - pen_simi return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore') #@profile", "embedding of idxo (relationl) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector", "= test_targets[test_idxs] r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation", "true_triples): \"\"\" This function computes the rank list of the lhs and rhs,", "= np.mean(np.asarray(dictrelres[i][1]) <= n) * 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n)", "%s, median: %s, hits@%s: %s%%' % ( round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'],", "r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs] # Evaluate the", "T.iscalar('idxo') g = T.matrix('g') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs:", "scores_r = (srlo).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl,", "* prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxl, idxo, g], [simi],", "RANKING FUNCTIONS # def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr", "l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec) slro = sl(r,", "for cl in range(len(scores_l)) if cl not in dr_domain] pen_idx_r = [cr for", "# Iterate over unique relation indexes for relidx in relidxs: # Select the", "5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s,", "leftop: class for the 'left' operator. :param rightop: class for the 'right' operator.", "idxo): \"\"\" This function computes the rank list of the rhs, over a", "values). :param fnsim: similarity function (on Theano variables). :param embeddings: an Embeddings instance.", "1xD vector containing the embedding of idxo (relationl) relr = (relationr.E[:, idxo]).reshape((1, relationr.D))", "of score values. \"\"\" return theano.function([idxl, idxo], [simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings, leftop,", "values. \"\"\" return theano.function([idxr, idxo], [simi], on_unused_input='ignore') def RankingScoreIdx(sl, sr, idxl, idxr, idxo):", "o)[0]).flatten())[::-1]).flatten()[r] + 1] return errr # # COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS #", "for i in inter_l if true_triples[i, 0] != l] inter_r = [i for", "# SCHEMA-AWARE RANKING FUNCTIONS # # # RANKING FUNCTIONS # def RankRightFnIdx_Schema(fnsim, embeddings,", "errr # # SCHEMA-AWARE RANKING FUNCTIONS # # # RANKING FUNCTIONS # def", "containing the 'relidx' predicate, and the corresponding target values valid_idxs = np.where(validoidx ==", "+= [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo,", "[i for i in il if i in io] rmv_idx_r += [true_triples[i, 2]", "true_triples[i, 2] != r] scores_l = (sl(r, o)[0]).flatten() scores_r = (sr(l, o)[0]).flatten() #", "i, j in enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for i in listrel: dictrellmean[i] =", "simi = simi - pen_simi return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore') #@profile def", "errl, errr def RankingScoreRightIdx(sr, idxl, idxr, idxo): \"\"\" This function computes the rank", "idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) tmp", "(embedding.E[:, :subtensorspec]).T else: rhs = embedding.E.T # rhs: NxD embedding matrix rell =", "schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec),", "relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr = [] for l, o,", "for a subset of entities lhs = (embedding.E[:, :subtensorspec]).T else: lhs = embedding.E.T", "%s, hits@%s: %s%%' % ( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3))) logging.info('\\t--", "None: # We compute the score only for a subset of entities rhs", "to RankingScoreIdx, but works on a subset of examples, defined in the 'selection'", "* r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec) slro = sl(r, o, gl)[0] srlo =", "np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1])", "dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n)", "embedding.E.T rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr =", "in inter_r if true_triples[i, 2] != r] scores_l = (sl(r, o)[0]).flatten() scores_r =", "+= [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl,", "value of the 'right' member. :input idxo: index value of the relation member.", "in test_triples: rmv_idx_l, rmv_idx_r = [], [] # Remove triples from true_triples from", "l, o, r in test_triples: rmv_idx_l, rmv_idx_r = [], [] # Remove triples", "global >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microgmean'], 5), round(dres['microgmedian'],", "in zip(idxl, idxo, idxr): errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l,", "= (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1,", "scores_r[pen_idx_r] -= illegal_dr_penalty errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1] errr += [np.argsort(np.argsort(- scores_r)).flatten()[r]", "# COMPUTING PERFORMANCE METRICS ON RANKINGS # # # Evaluation summary (as in", "dictrelgmean = {} dictrellmedian = {} dictrelrmedian = {} dictrelgmedian = {} dictrellrn", "[np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl, errr def RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo, selection=[]):", "for the 'left' operator. :param rightop: class for the 'right' operator. :param subtensorspec:", "threshold): classifications = classify(energies, threshold) comparisons = (targets == classifications) ret = [1.", "dr_range] scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty", "% ( round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3))) if idxo is not", "illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1] errr += [np.argsort(np.argsort(-", "T.iscalar('idxo') g = T.matrix('g') # Graph if subtensorspec is not None: # We", "= rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) pen_simi = g[0, :].T", "right >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrormean'], 5), round(dres['macrormedian'],", "a Theano function to measure the similarity score of all 'right' entities given", "simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b) pen_simi =", "# Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the", "subset of examples, defined in the 'selection' parameter. \"\"\" errl, errr = [],", "subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo')", "function output. :output simi: vector of score values. \"\"\" return theano.function([idxl, idxo], [simi],", "= [i for i in ir if i in io] rmv_idx_l += [true_triples[i,", "idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] *", "score values. \"\"\" return theano.function([idxl, idxo], [simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings, leftop, rightop,", "[true_triples[i,2] for i in inter_r if true_triples[i,2] != r] scores_r = (sr(l, o)[0]).flatten()", "of relation and 'right' entities (as index values). :param fnsim: similarity function (on", "for i in listrel: dictrelres.update({i: [[], []]}) for i, j in enumerate(res[0]): dictrelres[idxo[i]][0]", "* prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxr, idxo, g], [simi],", "# # RANKING FUNCTIONS # def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding,", "inter_r if true_triples[i, 2] != r] scores_r = (srlo).flatten() scores_r[rmv_idx_r] = -np.inf errr", "metrics.roc_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc # # COMPUTING PERFORMANCE METRICS", "works on a subset of examples, defined in the 'selection' parameter. \"\"\" errl,", "i in io] rmv_idx_l = [true_triples[i,0] for i in inter_l if true_triples[i,0] !=", "errl, errr def RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo, selection=[]): \"\"\" Similar to RankingScoreIdx,", "+= [true_triples[i, 0] for i in inter_l if true_triples[i, 0] != l] inter_r", "This function returns a Theano function to measure the similarity score of all", "(sl(r, o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i", "errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1] return errl, errr # # SCHEMA-AWARE RANKING", "# RANKING FUNCTIONS # def RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function", "on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo,", "[1. if comparison == True else 0. for comparison in comparisons] return ret", "summary (as in FB15k): # def ranking_summary(res, idxo=None, n=10, tag='raw'): resg = res[0]", "only for a subset of entities lhs = (embedding.E[:, :subtensorspec]).T else: lhs =", "errr def RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo, selection=[]): \"\"\" Similar to RankingScoreIdx, but", "list of the lhs and rhs, over a list of lhs, rhs and", "dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())})", "for relidx in relidxs: dr_domain, dr_range = rel2domain[relidx], rel2range[relidx] dr_domain = set(dr_domain) dr_range", "set(idxo) dictrelres = {} dictrellmean = {} dictrelrmean = {} dictrelgmean = {}", "srlo = sr(l, o, gr)[0] inter_l = [i for i in ir if", "parse_embeddings(embeddings) idxl, idxo, idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs, rhs = embedding.E[:, idxl].T,", "scores_l = (sl(r, o)[0]).flatten() scores_r = (sr(l, o)[0]).flatten() # Remove triples not in", "rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano function inputs. :input", "# # COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS # def classification_summary(energyfn, validlidx, validridx, validoidx,", "[[], []]}) for i, j in enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for i, j", "simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) pen_simi = g[0, :].T * prior.P[idxo, 0].T", "dr_range = set(dr_range) test_triples = [(l, o, r) for (l, o, r) in", "[] # Remove triples from true_triples from ranking results if true_triples is not", "lhs = embedding.E.T rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D))", "%s, median: %s, hits@%s: %s%%' % ( round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'],", "5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median: %s,", "o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r]", "RankingScoreRightIdx(sr, idxl, idxr, idxo): \"\"\" This function computes the rank list of the", "r_subtensorspec) slro = sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] inter_l =", "dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian':", "gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return errl,", "Find unique relation indexes relidxs = np.unique(validoidx) valid_matches, test_matches = [], [] #", "theano.tensor as T import logging from sklearn import metrics from sparse.learning import parse_embeddings", "relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of", "= {} dictrelrmean = {} dictrelgmean = {} dictrellmedian = {} dictrelrmedian =", "in io] rmv_idx_r = [true_triples[i, 2] for i in inter_r if true_triples[i, 2]", "[] errr = [] for l, o, r in zip(idxl, idxo, idxr): errl", "function computes the rank list of the lhs and rhs, over a list", "rmv_idx_l = [true_triples[i,0] for i in inter_l if true_triples[i,0] != l] scores_l =", "np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s,", "cl not in dr_domain] pen_idx_r = [cr for cr in range(len(scores_r)) if cr", "# def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings)", "res[0] + res[1] dres = {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <=", "r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets = test_targets[test_idxs] r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0]", "g], [simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr", "Theano function created with RankRightFnIdx(). :param idxl: list of 'left' indices. :param idxr:", "idxr: index value of the 'right' member. :input idxo: index value of the", "None: il = np.argwhere(true_triples[:, 0] == l).reshape(-1,) io = np.argwhere(true_triples[:, 1] == o).reshape(-1,)", "relr: 1xD vector containing the embedding of idxo (relationr) tmp = leftop(lhs, rell)", "median: %s, hits@%s: %s%%' % ( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3)))", "3))) logging.info('\\t-- right >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['micrormean'],", "[] for l, o, r in zip(idxl, idxo, idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r]", "idxo[i], idxr[i]) for i in selection]: errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr", "idxr): errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errr # # COMPUTING PERFORMANCE", "<= n) * 100}) logging.info('### MICRO (%s):' % (tag)) logging.info('\\t-- left >> mean:", "dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] =", "list of lhs, rhs and rel indexes. :param sr: Theano function created with", "1] return errl, errr def FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo, true_triples): \"\"\" This", "in cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def classification_matches(energies, targets, threshold): classifications =", "'''Computes the Area Under the Precision-Recall Curve (AUC-PR)''' predictions, labels = np.asarray(predictions), np.asarray(labels)", "tmp.shape[1]))) pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo,", "indices. :param idxo: list of relation indices. \"\"\" errl = [] errr =", "np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())})", "relidx] for l, o, r in test_triples: rmv_idx_l, rmv_idx_r = [], [] #", "= -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl, errr def RankingScoreIdx_sub(sl, sr,", "ranking results pen_idx_l = [cl for cl in range(len(scores_l)) if cl not in", "l_subtensorspec, [o] * l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec)", "in io] rmv_idx_r += [true_triples[i, 2] for i in inter_r if true_triples[i, 2]", ">> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microgmean'], 5), round(dres['microgmedian'], 5),", "corresponding target values test_idxs = np.where(testoidx == relidx) r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs],", "listrel: dictrelres.update({i: [[], []]}) for i, j in enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for", "dictrelres[idxo[i]][0] += [j] for i, j in enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for i", ":param idxr: list of 'right' indices. :param idxo: list of relation indices. \"\"\"", "if true_triples[i, 0] != l] inter_r = [i for i in il if", "def RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl =", "100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100}) resg =", "containing the embedding of idxo (relationr) tmp = leftop(lhs, rell) # a =", "= (sr(l, o)[0]).flatten() # Remove triples not in domain and range from ranking", "leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'),", "idxo): \"\"\" This function computes the rank list of the lhs and rhs,", "n) * 100}) resg = res[0] + res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n':", "relation indices. \"\"\" errl = [] errr = [] for l, o, r", "in dr_domain] pen_idx_r = [cr for cr in range(len(scores_r)) if cr not in", "MICRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s, median: %s, hits@%s: %s%%'", "the 'right' operator. :param subtensorspec: only measure the similarity score for the entities", "relationr.E[:, idxo].T energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo],", "return theano.function([idxl, idxo], [simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This", "[simi], on_unused_input='ignore') def RankingScoreIdx(sl, sr, idxl, idxr, idxo): \"\"\" This function computes the", "[np.argsort(np.argsort(- scores_r)).flatten()[r] + 1] return errl, errr # # SCHEMA-AWARE RANKING FUNCTIONS #", "l, o, r in zip(idxl, idxo, idxr): errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1]", ":input idxo: index value of the relation member. Theano function output. :output simi:", "dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean':", "= {} dictrelgmedian = {} dictrellrn = {} dictrelrrn = {} dictrelgrn =", "1] return errl, errr def RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo, selection=[]): \"\"\" Similar", "# b = relr(rhs) # Negative Energy simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr))", "np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1])", "subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo')", "l_subtensorspec=None, r_subtensorspec=None): errl = [] errr = [] for l, o, r in", "%s%%' % ( round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3))) logging.info('\\t-- global >>", "<= n) * 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100 dictrelgrn[i] =", "selection=[]): \"\"\" Similar to RankingScoreIdx, but works on a subset of examples, defined", "round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3))) return dres # # RANKING FUNCTIONS # def", "= sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] +", "%s, median: %s, hits@%s: %s%%' % ( round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'],", "%s -- Test Accuracy: %s' % ((np.mean(valid_matches) * 100.0), (np.mean(test_matches) * 100.0))) def", "+ x[:-1]) / 2., [x[-1]])) accuracies = [np.mean(classification_matches(energies, targets, cutpoint)) * 100.0 for", "classifications) ret = [1. if comparison == True else 0. for comparison in", "r in zip(idxl, idxo, idxr): errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr +=", "r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets = valid_targets[valid_idxs] # Evaluate the energies of", "to the first subtensorspec (int) entities of the embedding matrix (default None: all", "errl, errr = [], [] for l, o, r in [(idxl[i], idxo[i], idxr[i])", "idxr, idxo): \"\"\" This function computes the rank list of the lhs and", "all 'right' entities given couples of relation and 'left' entities (as index values).", "values test_idxs = np.where(testoidx == relidx) r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs]", "RANKING FUNCTIONS # # # RANKING FUNCTIONS # def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop,", "* 100.0 for cutpoint in cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def classification_matches(energies,", "= (relationr.E[:, idxo]).reshape((1, relationr.D)) tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1,", "[] errr = [] for l, o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,)", "dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean':", "coding: utf-8 -*- import numpy as np import theano import theano.tensor as T", "for i in il if i in io] rmv_idx_r = [true_triples[i, 2] for", "(as index values). :param fnsim: similarity function (on Theano variables). :param embeddings: an", "testridx, testoidx, test_targets): # Find unique relation indexes relidxs = np.unique(validoidx) valid_matches, test_matches", "inter_r = [i for i in il if i in io] rmv_idx_r +=", "errl, errr = [], [] relidxs = np.unique(idxo) for relidx in relidxs: dr_domain,", "5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3))) return dres # # RANKING FUNCTIONS #", "= [true_triples[i,0] for i in inter_l if true_triples[i,0] != l] scores_l = (sl(r,", "= [i for i in il if i in io] rmv_idx_r = [true_triples[i,2]", "errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return", "PERFORMANCE METRICS ON CLASSIFICATIONS # def classification_summary(energyfn, validlidx, validridx, validoidx, valid_targets, testlidx, testridx,", "indexes for relidx in relidxs: # Select the validation triples containing the 'relidx'", "FUNCTIONS # # # RANKING FUNCTIONS # def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop,", "simi - pen_simi return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl, sr,", "vector containing the embedding of idxl if subtensorspec is not None: # We", "idxl: list of 'left' indices. :param idxr: list of 'right' indices. :param idxo:", "idxo, true_triples, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr = []", "i, j in enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for i, j in enumerate(res[1]): dictrelres[idxo[i]][1]", "i in io] rmv_idx_l = [true_triples[i, 0] for i in inter_l if true_triples[i,", "dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())})", "= np.asarray([1 if energy < threshold else 0 for energy in energies]) return", "embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns a Theano function to measure", "rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) energy = -", "lhs, rhs = embedding.E[:, idxl].T, embedding.E[:, idxr].T rell, relr = relationl.E[:, idxo].T, relationr.E[:,", "rightop: class for the 'right' operator. :param subtensorspec: only measure the similarity score", "labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.precision_recall_curve(labels, predictions) auc = metrics.auc(recall,", "[x[-1]])) accuracies = [np.mean(classification_matches(energies, targets, cutpoint)) * 100.0 for cutpoint in cutpoints] best_cutpoint", "comparisons = (targets == classifications) ret = [1. if comparison == True else", "'''Computes the Area Under the Receiver Operating Characteristic Curve (AUC-ROC)''' predictions, labels =", "embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr,", "relidxs = np.unique(validoidx) valid_matches, test_matches = [], [] # Iterate over unique relation", "in io] rmv_idx_l += [true_triples[i, 0] for i in inter_l if true_triples[i, 0]", "validlidx, validridx, validoidx, valid_targets, testlidx, testridx, testoidx, test_targets): # Find unique relation indexes", "slro = sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l]", "[] for l, o, r in zip(idxl, idxo, idxr): errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r]", "dictrelrmean = {} dictrelgmean = {} dictrellmedian = {} dictrelrmedian = {} dictrelgmedian", "3))) if idxo is not None: listrel = set(idxo) dictrelres = {} dictrellmean", "right >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['micrormean'], 5), round(dres['micrormedian'],", "-np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return errl, errr def RankingScoreIdx_sub(sl, sr, idxl,", "rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'),", "\"\"\" This function returns a Theano function to measure the similarity score of", "rank list of the rhs, over a list of lhs, rhs and rel", "idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') g = T.matrix('g') # Graph lhs = (embedding.E[:,", "COMPUTING PERFORMANCE METRICS ON RANKINGS # # # Evaluation summary (as in FB15k):", "embeddings, leftop, rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.iscalar('idxl'),", "RANKINGS # # # Evaluation summary (as in FB15k): # def ranking_summary(res, idxo=None,", "il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i for i in ir if i in", "We compute the score only for a subset of entities rhs = (embedding.E[:,", "round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s:", "np.asarray([1 if energy < threshold else 0 for energy in energies]) return classifications", "Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') g = T.matrix('g') # Graph lhs =", "+= [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errr # # COMPUTING PERFORMANCE METRICS ON", "[], [] relidxs = np.unique(idxo) for relidx in relidxs: dr_domain, dr_range = rel2domain[relidx],", "= np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0])", "np.mean(np.asarray(dictrelres[i][0]) <= n) * 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100 dictrelgrn[i]", "ir if i in io] rmv_idx_l = [true_triples[i,0] for i in inter_l if", "list of lhs, rhs and rel indexes. :param sl: Theano function created with", "for cr in range(len(scores_r)) if cr not in dr_range] scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r]", "theano.function([idxr, idxo], [simi], on_unused_input='ignore') def RankingScoreIdx(sl, sr, idxl, idxr, idxo): \"\"\" This function", "lhs, rhs and rel indexes. :param sl: Theano function created with RankLeftFnIdx(). :param", "valid_idxs = np.where(validoidx == relidx) r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs] r_valid_targets", "rhs = (embedding.E[:, :subtensorspec]).T else: rhs = embedding.E.T # rhs: NxD embedding matrix", "embedding matrix (default None: all entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) #", "true_triples[i,0] != l] scores_l = (sl(r, o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l]", "0. for comparison in comparisons] return ret def classify(energies, threshold): classifications = np.asarray([1", "if idxo is not None: listrel = set(idxo) dictrelres = {} dictrellmean =", "= schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec) gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec,", "n=10, tag='raw'): resg = res[0] + res[1] dres = {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian':", "sparse.learning import parse_embeddings def auc_pr(predictions=[], labels=[]): '''Computes the Area Under the Precision-Recall Curve", "!= r] scores_l = (sl(r, o)[0]).flatten() scores_r = (sr(l, o)[0]).flatten() # Remove triples", "return errl, errr def RankingScoreRightIdx(sr, idxl, idxr, idxo): \"\"\" This function computes the", "(AUC-PR)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.precision_recall_curve(labels, predictions) auc", "= np.mean(np.asarray(dictrelres[i][0]) <= n) * 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100", "energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy: %s -- Test", "errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def RankingScoreRightIdx(sr, idxl, idxr,", "= energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy: %s --", "leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo =", "\"\"\" return theano.function([idxr, idxo], [simi], on_unused_input='ignore') def RankingScoreIdx(sl, sr, idxl, idxr, idxo): \"\"\"", "the score only for a subset of entities rhs = (embedding.E[:, :subtensorspec]).T else:", "= fnsim(a, b) pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T", "illegal_dr_penalty=1e6, true_triples=None): errl, errr = [], [] relidxs = np.unique(idxo) for relidx in", "relationr.D)) tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) pen_simi =", "np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):' % (tag)) logging.info('\\t--", "of idxo (relationr) tmp = leftop(lhs, rell) # a = rell(lhs) # b", "to measure the similarity score of all 'left' entities given couples of relation", "theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore') # # LEVERAGING RANGE AND DOMAIN RELATIONS DURING", "idxo (relationr) tmp = leftop(lhs, rell) # a = rell(lhs) # b =", "find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) # Select the test triples containing", "+ 1] return errl, errr def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, true_triples, relation2domainSet,", "list of the rhs, over a list of lhs, rhs and rel indexes.", "embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs", "predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.roc_curve(labels, predictions) auc =", "import metrics from sparse.learning import parse_embeddings def auc_pr(predictions=[], labels=[]): '''Computes the Area Under", "in range(len(scores_l)) if cl not in dr_domain] pen_idx_r = [cr for cr in", "Operating Characteristic Curve (AUC-ROC)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold =", "= (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) energy = - fnsim(leftop(lhs,", "errr = [] for l, o, r in zip(idxl, idxo, idxr): errl +=", "logging.info('Validation Accuracy: %s -- Test Accuracy: %s' % ((np.mean(valid_matches) * 100.0), (np.mean(test_matches) *", "the 'right' member. :input idxo: index value of the relation member. Theano function", "rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) pen_simi = g[0, :].T *", "[] errr = [] for l, o, r in zip(idxl, idxo, idxr): gl", "of 'right' indices. :param idxo: list of relation indices. \"\"\" errr = []", "[(idxl[i], idxo[i], idxr[i]) for i in selection]: errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1]", ":input idxr: index value of the 'right' member. :input idxo: index value of", "scores_r = (sr(l, o)[0]).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1] return", "-= illegal_dr_penalty errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1] errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] +", "for i in il if i in io] rmv_idx_r = [true_triples[i,2] for i", "( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3))) return dres # # RANKING", "idxl, idxr, idxo, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr =", "if true_triples[i, 0] != l] scores_l = (slro).flatten() scores_l[rmv_idx_l] = -np.inf errl +=", "but works on a subset of examples, defined in the 'selection' parameter. \"\"\"", "to measure the similarity score of all 'right' entities given couples of relation", "(tag)) logging.info('\\t-- left >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrolmean'],", "io] rmv_idx_l += [true_triples[i, 0] for i in inter_l if true_triples[i, 0] !=", "numpy as np import theano import theano.tensor as T import logging from sklearn", "fnsim: similarity function (on Theano variables). :param embeddings: an Embeddings instance. :param leftop:", "io] rmv_idx_l = [true_triples[i, 0] for i in inter_l if true_triples[i, 0] !=", "i in selection]: errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r]", "= (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of idxo", "np.argwhere(true_triples[:, 2] == r).reshape(-1,) inter_l = [i for i in ir if i", "'left' member. :input idxo: index value of the relation member. Theano function output.", "given couples of relation and 'left' entities (as index values). :param fnsim: similarity", "entities given couples of relation and 'left' entities (as index values). :param fnsim:", "idxl].T, embedding.E[:, idxr].T rell, relr = relationl.E[:, idxo].T, relationr.E[:, idxo].T energy = -", "0 for energy in energies]) return classifications # # CLASSIFICATION FUNCTIONS # def", "set(dr_range) test_triples = [(l, o, r) for (l, o, r) in zip(idxl, idxo,", "ranking results if true_triples is not None: il = np.argwhere(true_triples[:, 0] == l).reshape(-1,)", "relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') g =", "= [] for l, o, r in zip(idxl, idxo, idxr): errr += [np.argsort(np.argsort((sr(l,", "= simi - pen_simi return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings,", "median: %s, hits@%s: %s%%' % ( round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3)))", "function (on Theano variables). :param embeddings: an Embeddings instance. :param leftop: class for", "+= [true_triples[i, 2] for i in inter_r if true_triples[i, 2] != r] scores_l", "targets, cutpoint)) * 100.0 for cutpoint in cutpoints] best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint", "= leftop(lhs, rell) # a = rell(lhs) # b = relr(rhs) # Negative", "relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') g =", "idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of idxo (relationl) relr", "+ res[1] dres = {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n)", "metrics.auc(recall, precision) return auc def auc_roc(predictions=[], labels=[]): '''Computes the Area Under the Receiver", "= find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) # Select the test triples", "5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s,", "schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr = [] for l, o, r", "{} for i in listrel: dictrelres.update({i: [[], []]}) for i, j in enumerate(res[0]):", "dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):' % (tag))", "METRICS ON CLASSIFICATIONS # def classification_summary(energyfn, validlidx, validridx, validoidx, valid_targets, testlidx, testridx, testoidx,", "n, round(dres['macroghits@n'], 3))) return dres # # RANKING FUNCTIONS # def RankRightFnIdx(fnsim, embeddings,", "io] rmv_idx_r = [true_triples[i, 2] for i in inter_r if true_triples[i, 2] !=", "rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None): errl, errr = [], [] relidxs = np.unique(idxo) for", "np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100}) logging.info('### MICRO (%s):' % (tag)) logging.info('\\t--", "measure the similarity score for the entities corresponding to the first subtensorspec (int)", "# Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') # Graph lhs = (embedding.E[:, idxl]).reshape((1,", "= [] for l, o, r in zip(idxl, idxo, idxr): errl += [np.argsort(np.argsort((sl(r,", "+ 1] errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1] return errl, errr # #", "measure the similarity score of all 'left' entities given couples of relation and", "rell(lhs) # b = relr(rhs) # Negative Energy simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs,", "slro = sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] inter_l = [i", "dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) logging.info('### MACRO (%s):' % (tag)) logging.info('\\t-- left", "of examples, defined in the 'selection' parameter. \"\"\" errl, errr = [], []", "= res[0] + res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) *", "relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs,", "indexes relidxs = np.unique(validoidx) valid_matches, test_matches = [], [] # Iterate over unique", "energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) #", ":param leftop: class for the 'left' operator. :param rightop: class for the 'right'", "containing the embedding of idxo (relationl) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr:", "= T.matrix('g') # Graph if subtensorspec is not None: # We compute the", "list of 'right' indices. :param idxo: list of relation indices. \"\"\" errl =", "for l, o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l =", "cr in range(len(scores_r)) if cr not in dr_range] scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r] =", "We compute the score only for a subset of entities lhs = (embedding.E[:,", "Curve (AUC-PR)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.precision_recall_curve(labels, predictions)", "sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] inter_l = [i for i", "the 'left' operator. :param rightop: class for the 'right' operator. :param subtensorspec: only", "as np import theano import theano.tensor as T import logging from sklearn import", "PERFORMANCE METRICS ON RANKINGS # # # Evaluation summary (as in FB15k): #", "idxo], [energy], on_unused_input='ignore') # # LEVERAGING RANGE AND DOMAIN RELATIONS DURING LEARNING #", "# a = rell(lhs) # b = relr(rhs) # Negative Energy simi =", "= cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def classification_matches(energies, targets, threshold): classifications = classify(energies, threshold) comparisons", "[energy], on_unused_input='ignore') # # LEVERAGING RANGE AND DOMAIN RELATIONS DURING LEARNING # def", "classify(energies, threshold) comparisons = (targets == classifications) ret = [1. if comparison ==", "and range from ranking results pen_idx_l = [cl for cl in range(len(scores_l)) if", "containing the 'relidx' predicate, and the corresponding target values test_idxs = np.where(testoidx ==", "np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i]", "true_triples[i, 2] != r] scores_r = (srlo).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r]", "T.ivector('idxr') lhs, rhs = embedding.E[:, idxl].T, embedding.E[:, idxr].T rell, relr = relationl.E[:, idxo].T,", "= [cl for cl in range(len(scores_l)) if cl not in dr_domain] pen_idx_r =", "tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano function inputs. :input idxr: index value of the 'right'", "from sparse.learning import parse_embeddings def auc_pr(predictions=[], labels=[]): '''Computes the Area Under the Precision-Recall", "selection]: errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1]", "if true_triples[i, 2] != r] scores_l = (sl(r, o)[0]).flatten() scores_r = (sr(l, o)[0]).flatten()", "T.iscalar('idxo') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing", "relidx in relidxs: # Select the validation triples containing the 'relidx' predicate, and", "100}) logging.info('### MICRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s, median: %s,", "r] scores_l = (sl(r, o)[0]).flatten() scores_r = (sr(l, o)[0]).flatten() # Remove triples not", "dictrellmedian = {} dictrelrmedian = {} dictrelgmedian = {} dictrellrn = {} dictrelrrn", "# simi = fnsim(a, b) pen_simi = g[0, :].T * prior.P[idxo, 0].T +", "idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) energy = - fnsim(leftop(lhs, rell), rightop(rhs,", "auc = metrics.auc(recall, precision) return auc def auc_roc(predictions=[], labels=[]): '''Computes the Area Under", "idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) #", "np.argwhere(true_triples[:, 1] == o).reshape(-1,) ir = np.argwhere(true_triples[:, 2] == r).reshape(-1,) inter_l = [i", "[np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr", "of idxo (relationl) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing", "errr = [], [] for l, o, r in [(idxl[i], idxo[i], idxr[i]) for", "{} dictrelrmean = {} dictrelgmean = {} dictrellmedian = {} dictrelrmedian = {}", "in inter_l if true_triples[i, 0] != l] scores_l = (slro).flatten() scores_l[rmv_idx_l] = -np.inf", "idxo, idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr') lhs, rhs = embedding.E[:, idxl].T, embedding.E[:, idxr].T", "= fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano function inputs. :input idxr: index value", "T.iscalar('idxr') lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell =", "domain and range from ranking results pen_idx_l = [cl for cl in range(len(scores_l))", "\"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo')", "idxo]).reshape((1, relationr.D)) energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo],", "(l, o, r) in zip(idxl, idxo, idxr) if o == relidx] for l,", "round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3))) if idxo is not None: listrel = set(idxo)", "T.iscalar('idxl'), T.iscalar('idxo') g = T.matrix('g') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) #", "compute the score only for a subset of entities lhs = (embedding.E[:, :subtensorspec]).T", "idxo, idxr): errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] +", "score of all 'left' entities given couples of relation and 'right' entities (as", "= np.unique(validoidx) valid_matches, test_matches = [], [] # Iterate over unique relation indexes", "test_triples = [(l, o, r) for (l, o, r) in zip(idxl, idxo, idxr)", "o, r in test_triples: rmv_idx_l, rmv_idx_r = [], [] # Remove triples from", "return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo,", "entities corresponding to the first subtensorspec (int) entities of the embedding matrix (default", "r_validridx, r_validoidx)[0] r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets) valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) # Select", "= [], [] relidxs = np.unique(idxo) for relidx in relidxs: dr_domain, dr_range =", "res[0] + res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100})", "srlo = sr(l, o, gr)[0] errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r]", "scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty errl", "= (sl(r, o)[0]).flatten() scores_r = (sr(l, o)[0]).flatten() # Remove triples not in domain", "of relation and 'left' entities (as index values). :param fnsim: similarity function (on", "rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr')", "prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T simi = simi - pen_simi", "defined in the 'selection' parameter. \"\"\" errl, errr = [], [] for l,", "def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) #", "value of the relation member. Theano function output. :output simi: vector of score", "rmv_idx_l, rmv_idx_r = [], [] # Remove triples from true_triples from ranking results", "in domain and range from ranking results pen_idx_l = [cl for cl in", "embedding.E.T # rhs: NxD embedding matrix rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell:", "l, o, r in zip(idxl, idxo, idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec,", "= set(dr_range) test_triples = [(l, o, r) for (l, o, r) in zip(idxl,", "embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') g", ":input idxl: index value of the 'left' member. :input idxo: index value of", "relationl.E[:, idxo].T, relationr.E[:, idxo].T energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl,", "sklearn import metrics from sparse.learning import parse_embeddings def auc_pr(predictions=[], labels=[]): '''Computes the Area", "corresponding to the first subtensorspec (int) entities of the embedding matrix (default None:", "test_targets[test_idxs] r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint) logging.info('Validation Accuracy:", "relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs =", "j in enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for i, j in enumerate(res[1]): dictrelres[idxo[i]][1] +=", "(default None: all entities). \"\"\" embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl,", "\"\"\" Theano function inputs. :input idxl: index value of the 'left' member. :input", "lhs and rhs, over a list of lhs, rhs and rel indexes. :param", "g[1, :].T * prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxr, idxo,", "subtensorspec is not None: # We compute the score only for a subset", "Graph if subtensorspec is not None: # We compute the score only for", "indexes. :param sl: Theano function created with RankLeftFnIdx(). :param sr: Theano function created", "testoidx, test_targets): # Find unique relation indexes relidxs = np.unique(validoidx) valid_matches, test_matches =", "'selection' parameter. \"\"\" errl, errr = [], [] for l, o, r in", "il = np.argwhere(true_triples[:, 0] == l).reshape(-1,) io = np.argwhere(true_triples[:, 1] == o).reshape(-1,) ir", "= np.argwhere(true_triples[:, 1] == o).reshape(-1,) ir = np.argwhere(true_triples[:, 2] == r).reshape(-1,) inter_l =", "simi - pen_simi return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore') def RankLeftFnIdx_Schema(fnsim, embeddings, prior,", "predicate, and the corresponding target values valid_idxs = np.where(validoidx == relidx) r_validlidx, r_validridx,", "energies]) return classifications # # CLASSIFICATION FUNCTIONS # def EnergyFn(fnsim, embeddings, leftop, rightop):", "100.0), (np.mean(test_matches) * 100.0))) def find_classification_threshold(energies, targets): x = np.unique(np.sort(energies)) cutpoints = np.concatenate(([x[0]],", "predictions) auc = metrics.auc(recall, precision) return auc # # COMPUTING PERFORMANCE METRICS ON", "{} dictrelrmedian = {} dictrelgmedian = {} dictrellrn = {} dictrelrrn = {}", "illegal_dr_penalty errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1] errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1]", "'left' indices. :param idxr: list of 'right' indices. :param idxo: list of relation", "l] scores_l = (slro).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r", "test_idxs = np.where(testoidx == relidx) r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets", "+ dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <=", "dictrellrn = {} dictrelrrn = {} dictrelgrn = {} for i in listrel:", "the corresponding target values test_idxs = np.where(testoidx == relidx) r_testlidx, r_testridx, r_testoidx =", "on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None): \"\"\" This function returns a Theano", "0] for i in inter_l if true_triples[i, 0] != l] inter_r = [i", "sr, idxl, idxr, idxo): \"\"\" This function computes the rank list of the", "zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) inter_l = [i for i in ir", "vector of score values. \"\"\" return theano.function([idxl, idxo], [simi], on_unused_input='ignore') def RankLeftFnIdx(fnsim, embeddings,", "parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') # Graph lhs = (embedding.E[:,", "cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def classification_matches(energies, targets, threshold): classifications = classify(energies, threshold) comparisons =", "relr = relationl.E[:, idxo].T, relationr.E[:, idxo].T energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr))", "+ 1] return errl, errr # # SCHEMA-AWARE RANKING FUNCTIONS # # #", "the test triples containing the 'relidx' predicate, and the corresponding target values test_idxs", "Theano function to measure the similarity score of all 'right' entities given couples", "= T.iscalar('idxl'), T.iscalar('idxo') g = T.matrix('g') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D))", "n) * 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0]", "r in zip(idxl, idxo, idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] *", "tag='raw'): resg = res[0] + res[1] dres = {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])})", "Evaluate the energies of those triples r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0] r_valid_cutpoint =", "else 0 for energy in energies]) return classifications # # CLASSIFICATION FUNCTIONS #", "a subset of examples, defined in the 'selection' parameter. \"\"\" errl, errr =", "dictrelres = {} dictrellmean = {} dictrelrmean = {} dictrelgmean = {} dictrellmedian", "variables). :param embeddings: an Embeddings instance. :param leftop: class for the 'left' operator.", "idxr): errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1]", "= -np.inf scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l] -= illegal_dr_penalty scores_r[pen_idx_r] -= illegal_dr_penalty errl +=", "a subset of entities rhs = (embedding.E[:, :subtensorspec]).T else: rhs = embedding.E.T #", "i in inter_l if true_triples[i,0] != l] scores_l = (sl(r, o)[0]).flatten() scores_l[rmv_idx_l] =", "indexes. :param sr: Theano function created with RankRightFnIdx(). :param idxl: list of 'left'", "return auc # # COMPUTING PERFORMANCE METRICS ON RANKINGS # # # Evaluation", "Remove triples not in domain and range from ranking results pen_idx_l = [cl", "best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))] return best_cutpoint def classification_matches(energies, targets, threshold): classifications = classify(energies, threshold)", "in range(len(scores_r)) if cr not in dr_range] scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r] = -np.inf", "in zip(idxl, idxo, idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec)", "def FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo, true_triples): \"\"\" This function computes the rank", "true_triples[i, 0] != l] inter_r = [i for i in il if i", "logging.info('\\t-- left >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['microlmean'], 5),", "%s%%' % ( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3))) return dres #", "np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100}) resg = res[0] + res[1] dres.update({'microgmean':", "in the 'selection' parameter. \"\"\" errl, errr = [], [] for l, o,", "5), n, round(dres['macroghits@n'], 3))) return dres # # RANKING FUNCTIONS # def RankRightFnIdx(fnsim,", "np.concatenate(([x[0]], (x[1:] + x[:-1]) / 2., [x[-1]])) accuracies = [np.mean(classification_matches(energies, targets, cutpoint)) *", "from ranking results pen_idx_l = [cl for cl in range(len(scores_l)) if cl not", "def ranking_summary(res, idxo=None, n=10, tag='raw'): resg = res[0] + res[1] dres = {}", "lhs = (embedding.E[:, :subtensorspec]).T else: lhs = embedding.E.T rhs = (embedding.E[:, idxr]).reshape((1, embedding.D))", "l] scores_l = (sl(r, o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1]", "embedding.D)) # lhs: 1xD vector containing the embedding of idxl if subtensorspec is", "threshold = metrics.precision_recall_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc def auc_roc(predictions=[], labels=[]):", "best_cutpoint def classification_matches(energies, targets, threshold): classifications = classify(energies, threshold) comparisons = (targets ==", "Negative Energy simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b)", "o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i for", "unique relation indexes for relidx in relidxs: # Select the validation triples containing", "= np.where(testoidx == relidx) r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets =", "\"\"\" This function computes the rank list of the lhs and rhs, over", "utf-8 -*- import numpy as np import theano import theano.tensor as T import", "relationr.D)) tmp = rightop(rhs, relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) \"\"\" Theano", "over a list of lhs, rhs and rel indexes. :param sr: Theano function", "(relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) energy = - fnsim(leftop(lhs, rell),", "1] return errl, errr # # SCHEMA-AWARE RANKING FUNCTIONS # # # RANKING", "relr)) # simi = fnsim(a, b) pen_simi = g[0, :].T * prior.P[idxo, 0].T", "r_valid_targets, r_valid_cutpoint) # Select the test triples containing the 'relidx' predicate, and the", "function returns a Theano function to measure the similarity score of all 'right'", "= np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0])", "= (embedding.E[:, :subtensorspec]).T else: rhs = embedding.E.T # rhs: NxD embedding matrix rell", "(relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of idxo (relationr)", "values valid_idxs = np.where(validoidx == relidx) r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs]", "# def classification_summary(energyfn, validlidx, validridx, validoidx, valid_targets, testlidx, testridx, testoidx, test_targets): # Find", "over unique relation indexes for relidx in relidxs: # Select the validation triples", "r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets = test_targets[test_idxs] r_test_energies = energyfn(r_testlidx,", "true_triples=None): errl, errr = [], [] relidxs = np.unique(idxo) for relidx in relidxs:", "%s, hits@%s: %s%%' % ( round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3))) logging.info('\\t--", "= (sl(r, o)[0]).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r =", "median: %s, hits@%s: %s%%' % ( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3)))", "not None: il = np.argwhere(true_triples[:, 0] == l).reshape(-1,) io = np.argwhere(true_triples[:, 1] ==", "# rell: 1xD vector containing the embedding of idxo (relationl) relr = (relationr.E[:,", "list of 'right' indices. :param idxo: list of relation indices. \"\"\" errr =", "global >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrogmean'], 5), round(dres['macrogmedian'],", "the 'relidx' predicate, and the corresponding target values test_idxs = np.where(testoidx == relidx)", "threshold) comparisons = (targets == classifications) ret = [1. if comparison == True", "comparison == True else 0. for comparison in comparisons] return ret def classify(energies,", "<= n) * 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian':", "theano.function([idxr, idxo, g], [simi], on_unused_input='ignore') #@profile def RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, relation2domainSet,", "= (embedding.E[:, :subtensorspec]).T else: lhs = embedding.E.T rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell", "o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def RankingScoreRightIdx(sr, idxl, idxr, idxo): \"\"\" This", "{} dictrelgmean = {} dictrellmedian = {} dictrelrmedian = {} dictrelgmedian = {}", "True else 0. for comparison in comparisons] return ret def classify(energies, threshold): classifications", "labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.roc_curve(labels, predictions) auc = metrics.auc(recall,", "np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) * 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean})", "== relidx) r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets = test_targets[test_idxs] r_test_energies", "sl: Theano function created with RankLeftFnIdx(). :param sr: Theano function created with RankRightFnIdx().", "-np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i for i in il", "* 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] +", "i in io] rmv_idx_r = [true_triples[i,2] for i in inter_r if true_triples[i,2] !=", "'right' indices. :param idxo: list of relation indices. \"\"\" errr = [] for", "for l, o, r in zip(idxl, idxo, idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl =", "0].T + g[1, :].T * prior.P[idxo, 1].T simi = simi - pen_simi return", "# We compute the score only for a subset of entities lhs =", "(int) entities of the embedding matrix (default None: all entities). \"\"\" embedding, relationl,", "subset of entities lhs = (embedding.E[:, :subtensorspec]).T else: lhs = embedding.E.T rhs =", "\"\"\" Similar to RankingScoreIdx, but works on a subset of examples, defined in", "Select the test triples containing the 'relidx' predicate, and the corresponding target values", "dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn':", "member. Theano function output. :output simi: vector of score values. \"\"\" return theano.function([idxr,", "= np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100 dictrelrrn[i] =", "for i, j in enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for i in listrel: dictrellmean[i]", "%s%%' % ( round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3))) logging.info('\\t-- right >>", "il if i in io] rmv_idx_r = [true_triples[i,2] for i in inter_r if", "(on Theano variables). :param embeddings: an Embeddings instance. :param leftop: class for the", "( round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3))) logging.info('\\t-- global >> mean: %s,", ":subtensorspec]).T else: lhs = embedding.E.T rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:,", "1] errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx(sl, sr,", "1] errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1] return errl, errr def FilteredRankingScoreIdx_Schema(sl, sr, idxl,", "rhs, over a list of lhs, rhs and rel indexes. :param sr: Theano", "for i in ir if i in io] rmv_idx_l = [true_triples[i, 0] for", "b) \"\"\" Theano function inputs. :input idxl: index value of the 'left' member.", "in comparisons] return ret def classify(energies, threshold): classifications = np.asarray([1 if energy <", "inter_l if true_triples[i, 0] != l] scores_l = (slro).flatten() scores_l[rmv_idx_l] = -np.inf errl", "if i in io] rmv_idx_r += [true_triples[i, 2] for i in inter_r if", "idxo], [simi], on_unused_input='ignore') def RankingScoreIdx(sl, sr, idxl, idxr, idxo): \"\"\" This function computes", "rell), tmp.reshape((1, tmp.shape[1]))) pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T", "= (slro).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1] inter_r = [i", "n) * 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) * 100 dres.update({'dictrelres':", "errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1] errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1] return", "of entities lhs = (embedding.E[:, :subtensorspec]).T else: lhs = embedding.E.T rhs = (embedding.E[:,", "FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None): errl, errr = [],", "%s%%' % ( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3))) logging.info('\\t-- right >>", "resg = res[0] + res[1] dres = {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n':", "metrics.precision_recall_curve(labels, predictions) auc = metrics.auc(recall, precision) return auc def auc_roc(predictions=[], labels=[]): '''Computes the", "dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n)", "the Precision-Recall Curve (AUC-PR)''' predictions, labels = np.asarray(predictions), np.asarray(labels) precision, recall, threshold =", "+= classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint) # Select the test triples containing the 'relidx' predicate,", "= set(dr_domain) dr_range = set(dr_range) test_triples = [(l, o, r) for (l, o,", "in ir if i in io] rmv_idx_l = [true_triples[i, 0] for i in", "testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs] r_test_targets = test_targets[test_idxs] r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches +=", "function created with RankLeftFnIdx(). :param sr: Theano function created with RankRightFnIdx(). :param idxl:", "parse_embeddings(embeddings) # Inputs idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') # Graph if subtensorspec is", "idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo') # Graph if subtensorspec is not None: #", "parse_embeddings(embeddings) idxl, idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs = (embedding.E[:, idxl]).reshape((1, embedding.D))", "inter_r = [i for i in il if i in io] rmv_idx_r =", "idxr): il=np.argwhere(true_triples[:,0]==l).reshape(-1,) io=np.argwhere(true_triples[:,1]==o).reshape(-1,) ir=np.argwhere(true_triples[:,2]==r).reshape(-1,) gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec)", "embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) tmp =", "function to measure the similarity score of all 'right' entities given couples of", "for i in il if i in io] rmv_idx_r += [true_triples[i, 2] for", "idxo].T energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr)) return theano.function([idxl, idxr, idxo], [energy],", "% ( round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3))) logging.info('\\t-- right >> mean:", "of the lhs and rhs, over a list of lhs, rhs and rel", "sr: Theano function created with RankRightFnIdx(). :param idxl: list of 'left' indices. :param", "classification_summary(energyfn, validlidx, validridx, validoidx, valid_targets, testlidx, testridx, testoidx, test_targets): # Find unique relation", "2] != r] scores_r = (srlo).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] +", "T.ivector('idxo'), T.ivector('idxr') lhs, rhs = embedding.E[:, idxl].T, embedding.E[:, idxr].T rell, relr = relationl.E[:,", "dictrelgrn = {} for i in listrel: dictrelres.update({i: [[], []]}) for i, j", "for i in listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0]", "r).reshape(-1,) inter_l = [i for i in ir if i in io] rmv_idx_l", "the rank list of the lhs and rhs, over a list of lhs,", "= embedding.E[:, idxl].T, embedding.E[:, idxr].T rell, relr = relationl.E[:, idxo].T, relationr.E[:, idxo].T energy", "range(r_subtensorspec), [o] * r_subtensorspec) slro = sl(r, o, gl)[0] srlo = sr(l, o,", "= sr(l, o, gr)[0] inter_l = [i for i in ir if i", "[i for i in il if i in io] rmv_idx_r = [true_triples[i,2] for", "= np.concatenate(([x[0]], (x[1:] + x[:-1]) / 2., [x[-1]])) accuracies = [np.mean(classification_matches(energies, targets, cutpoint))", "NxD embedding matrix rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing", "= parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') g = T.matrix('g') #", "score values. \"\"\" return theano.function([idxr, idxo], [simi], on_unused_input='ignore') def RankingScoreIdx(sl, sr, idxl, idxr,", "listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i]", "a Theano function to measure the similarity score of all 'left' entities given", "for l, o, r in [(idxl[i], idxo[i], idxr[i]) for i in selection]: errl", "embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') #", "rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) tmp = rightop(rhs,", "with RankRightFnIdx(). :param idxl: list of 'left' indices. :param idxr: list of 'right'", "lhs, rhs and rel indexes. :param sr: Theano function created with RankRightFnIdx(). :param", "rhs and rel indexes. :param sr: Theano function created with RankRightFnIdx(). :param idxl:", "idxr: list of 'right' indices. :param idxo: list of relation indices. \"\"\" errl", "test_matches = [], [] # Iterate over unique relation indexes for relidx in", "the rhs, over a list of lhs, rhs and rel indexes. :param sr:", "round(dres['microrhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%' % (", "rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:,", "2] != r] scores_l = (sl(r, o)[0]).flatten() scores_r = (sr(l, o)[0]).flatten() # Remove", "rell, relr = relationl.E[:, idxo].T, relationr.E[:, idxo].T energy = - fnsim(leftop(lhs, rell), rightop(rhs,", "idxr[i]) for i in selection]: errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1] errr +=", "relidxs: # Select the validation triples containing the 'relidx' predicate, and the corresponding", "(embedding.E[:, idxl]).reshape((1, embedding.D)) rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D))", "auc_pr(predictions=[], labels=[]): '''Computes the Area Under the Precision-Recall Curve (AUC-PR)''' predictions, labels =", "= rel2domain[relidx], rel2range[relidx] dr_domain = set(dr_domain) dr_range = set(dr_range) test_triples = [(l, o,", "'right' entities given couples of relation and 'left' entities (as index values). :param", "= [true_triples[i, 0] for i in inter_l if true_triples[i, 0] != l] scores_l", "dres = {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100})", "in relidxs: # Select the validation triples containing the 'relidx' predicate, and the", "testridx[test_idxs], testoidx[test_idxs] r_test_targets = test_targets[test_idxs] r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0] test_matches += classification_matches(r_test_energies,", "(embedding.E[:, :subtensorspec]).T else: lhs = embedding.E.T rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell =", "Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') # Graph lhs = (embedding.E[:, idxl]).reshape((1, embedding.D))", "true_triples[i, 0] != l] scores_l = (slro).flatten() scores_l[rmv_idx_l] = -np.inf errl += [np.argsort(np.argsort(-scores_l)).flatten()[l]", "round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3))) logging.info('\\t-- global >> mean: %s, median: %s, hits@%s:", "in inter_l if true_triples[i, 0] != l] inter_r = [i for i in", "ret def classify(energies, threshold): classifications = np.asarray([1 if energy < threshold else 0", "errr = [], [] relidxs = np.unique(idxo) for relidx in relidxs: dr_domain, dr_range", "return classifications # # CLASSIFICATION FUNCTIONS # def EnergyFn(fnsim, embeddings, leftop, rightop): embedding,", "= np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) *", "# Select the test triples containing the 'relidx' predicate, and the corresponding target", "logging.info('### MACRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s, median: %s, hits@%s:", "= valid_targets[valid_idxs] # Evaluate the energies of those triples r_valid_energies = energyfn(r_validlidx, r_validridx,", "in io] rmv_idx_l = [true_triples[i, 0] for i in inter_l if true_triples[i, 0]", "round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3))) logging.info('\\t-- right >> mean: %s, median:", "RELATIONS DURING LEARNING # def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6,", "idxo = T.iscalar('idxl'), T.iscalar('idxo') g = T.matrix('g') # Graph lhs = (embedding.E[:, idxl]).reshape((1,", "None: # We compute the score only for a subset of entities lhs", "RankRightFnIdx(). :param idxl: list of 'left' indices. :param idxr: list of 'right' indices.", "o)[0]).flatten() scores_r = (sr(l, o)[0]).flatten() # Remove triples not in domain and range", "not in dr_domain] pen_idx_r = [cr for cr in range(len(scores_r)) if cr not", "a list of lhs, rhs and rel indexes. :param sr: Theano function created", "'left' entities (as index values). :param fnsim: similarity function (on Theano variables). :param", "dictrelres[i][1]) <= n) * 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean})", "rightop): embedding, relationl, relationr = parse_embeddings(embeddings) idxl, idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr')", "accuracies = [np.mean(classification_matches(energies, targets, cutpoint)) * 100.0 for cutpoint in cutpoints] best_cutpoint =", "* r_subtensorspec) slro = sl(r, o, gl)[0] srlo = sr(l, o, gr)[0] inter_l", "% ( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3))) return dres # #", "g[1, :].T * prior.P[idxo, 1].T simi = simi - pen_simi return theano.function([idxl, idxo,", "* 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100}) resg", "{} dictrelrrn = {} dictrelgrn = {} for i in listrel: dictrelres.update({i: [[],", "'left' operator. :param rightop: class for the 'right' operator. :param subtensorspec: only measure", "idxr, idxo, selection=[]): \"\"\" Similar to RankingScoreIdx, but works on a subset of", "= [] for l, o, r in zip(idxl, idxo, idxr): gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec),", "(embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of idxl if", "1] == o).reshape(-1,) ir = np.argwhere(true_triples[:, 2] == r).reshape(-1,) inter_l = [i for", "in il if i in io] rmv_idx_r = [true_triples[i, 2] for i in", "[j] for i, j in enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for i in listrel:", "!= r] scores_r = (srlo).flatten() scores_r[rmv_idx_r] = -np.inf errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1]", "return auc def auc_roc(predictions=[], labels=[]): '''Computes the Area Under the Receiver Operating Characteristic", "relationr = parse_embeddings(embeddings) # Inputs idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo') # Graph lhs", "b = relr(rhs) # Negative Energy simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) #", "parameter. \"\"\" errl, errr = [], [] for l, o, r in [(idxl[i],", "true_triples, relation2domainSet, relation2rangeSet, schemaPenalty, l_subtensorspec=None, r_subtensorspec=None): errl = [] errr = [] for", "on_unused_input='ignore') # # LEVERAGING RANGE AND DOMAIN RELATIONS DURING LEARNING # def FilteredRankingScoreIdx_DR(sl,", "{} dictrelgrn = {} for i in listrel: dictrelres.update({i: [[], []]}) for i,", "true_triples is not None: il = np.argwhere(true_triples[:, 0] == l).reshape(-1,) io = np.argwhere(true_triples[:,", "= [i for i in il if i in io] rmv_idx_r += [true_triples[i,", "and 'left' entities (as index values). :param fnsim: similarity function (on Theano variables).", "= res[0] + res[1] dres = {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0])", "def RankingScoreIdx(sl, sr, idxl, idxr, idxo): \"\"\" This function computes the rank list", "RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None): embedding, relationl, relationr = parse_embeddings(embeddings) # Inputs", "# Graph if subtensorspec is not None: # We compute the score only", "idxr, idxo], [energy], on_unused_input='ignore') def EnergyVecFn(fnsim, embeddings, leftop, rightop): embedding, relationl, relationr =", "is not None: il = np.argwhere(true_triples[:, 0] == l).reshape(-1,) io = np.argwhere(true_triples[:, 1]", "logging.info('\\t-- global >> mean: %s, median: %s, hits@%s: %s%%' % ( round(dres['macrogmean'], 5),", "% (tag)) logging.info('\\t-- left >> mean: %s, median: %s, hits@%s: %s%%' % (", "idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) energy", "embedding.E[:, idxr].T rell, relr = relationl.E[:, idxo].T, relationr.E[:, idxo].T energy = - fnsim(leftop(lhs,", "logging.info('### MICRO (%s):' % (tag)) logging.info('\\t-- left >> mean: %s, median: %s, hits@%s:", "relr) simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1]))) pen_simi = g[0, :].T * prior.P[idxo,", "errr def FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo, true_triples): \"\"\" This function computes the", "= np.asarray(predictions), np.asarray(labels) precision, recall, threshold = metrics.precision_recall_curve(labels, predictions) auc = metrics.auc(recall, precision)", "idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr') lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) rhs = (embedding.E[:,", "cr not in dr_range] scores_l[rmv_idx_l] = -np.inf scores_r[rmv_idx_r] = -np.inf scores_l[pen_idx_l] -= illegal_dr_penalty", "test_triples: rmv_idx_l, rmv_idx_r = [], [] # Remove triples from true_triples from ranking", "else: lhs = embedding.E.T rhs = (embedding.E[:, idxr]).reshape((1, embedding.D)) rell = (relationl.E[:, idxo]).reshape((1,", "of the relation member. Theano function output. :output simi: vector of score values." ]
[ ") @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text): assert not manimpango.MarkupUtils.validate( text, ), f\"{text}", "\"sed do eiusmod tempor incididunt ut labore et dolore\" \"magna aliqua. Ut enim", "loc.exists() def test_markup_style(tmpdir): test_case = CASES_DIR / \"hello_blue_world_green.svg\" expected = tmpdir / \"expected.svg\"", "Path import pytest import manimpango from . import CASES_DIR from ._manim import MarkupText", "# don't know how to verify this correctly # it varies upon diffent", "\"in reprehenderit in voluptate velit esse cillum\" \"dolore eu fugiat nulla pariatur. Excepteur", "loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc)) assert loc.exists() def test_markup_indent(tmpdir): # don't know how to", "@pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text): assert not manimpango.MarkupUtils.validate( text, ), f\"{text} should", "= Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc)) assert loc.exists() def test_markup_indent(tmpdir):", "from pathlib import Path import pytest import manimpango from . import CASES_DIR from", "foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected), ) s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style) ==", "MarkupText( text, filename=str(expected), ) s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style) == len(s.expected_svg_style) assert", "'<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) ) assert loc.exists() def test_markup_justify(tmpdir): # don't know how", "pathlib import Path import pytest import manimpango from . import CASES_DIR from ._manim", "sit amet, <i>consectetur</i> adipiscing elit,\" \"sed do eiusmod tempor incididunt ut labore et", "MarkupText(ipsum_text, justify=True, filename=str(loc)) assert loc.exists() def test_markup_indent(tmpdir): # don't know how to verify", "know how to verify this correctly # it varies upon diffent system so,", "filename=str(loc), ) assert loc.exists() def test_markup_style(tmpdir): test_case = CASES_DIR / \"hello_blue_world_green.svg\" expected =", "tmpdir = Path(tmpdir) wrapped = tmpdir / \"wrap.svg\" nowrap = tmpdir / \"nowarap.svg\"", "Path(tmpdir) wrapped = tmpdir / \"wrap.svg\" nowrap = tmpdir / \"nowarap.svg\" MarkupText(ipsum_text, wrap_text=False,", ") assert loc.exists() def test_markup_style(tmpdir): test_case = CASES_DIR / \"hello_blue_world_green.svg\" expected = tmpdir", ") s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style) == len(s.expected_svg_style) assert s.got_svg_style == s.expected_svg_style", "assert loc.exists() def test_markup_justify(tmpdir): # don't know how to verify this correctly #", "1 char 14\", ), ], ) def test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text) == error", "@pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert manimpango.MarkupUtils.validate( text ), f\"{text} should fail validation", "-*- coding: utf-8 -*- from pathlib import Path import pytest import manimpango from", "loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert loc.exists() def test_markup_style(tmpdir): test_case = CASES_DIR", "\"dolore eu fugiat nulla pariatur. Excepteur sint\" \"occaecat cupidatat non proident, sunt in", "text ), f\"{text} should fail validation (unbalanced tags)\" @pytest.mark.parametrize( \"text,error\", [ ( \"<b>foo\",", "\"test.svg\") assert not loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert loc.exists() def test_markup_style(tmpdir):", "def test_good_markup(text): assert not manimpango.MarkupUtils.validate( text, ), f\"{text} should not fail validation\" @pytest.mark.parametrize(\"text\",", "verify this correctly # it varies upon diffent system so, we are #", ". import CASES_DIR from ._manim import MarkupText from .svg_tester import SVGStyleTester ipsum_text =", "runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc)) assert loc.exists()", "nowrap = tmpdir / \"nowarap.svg\" MarkupText(ipsum_text, wrap_text=False, filename=str(nowrap)) MarkupText(ipsum_text, filename=str(wrapped)) assert wrapped.read_text() !=", "sunt in culpa qui\" \"officia deserunt mollit anim id est laborum.\" ) @pytest.mark.parametrize(\"text\",", "text, filename=str(expected), ) s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style) == len(s.expected_svg_style) assert s.got_svg_style", "Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) ) assert loc.exists()", "Ut enim <b>ad</b> minim veniam, quis nostrud\" \"exercitation ullamco laboris nisi ut aliquip\"", "import pytest import manimpango from . import CASES_DIR from ._manim import MarkupText from", "), ], ) def test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text) == error def test_markup_text(tmpdir): loc", "CASES_DIR / \"hello_blue_world_green.svg\" expected = tmpdir / \"expected.svg\" text = \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\"", "error def test_markup_text(tmpdir): loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello", "], ) def test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text) == error def test_markup_text(tmpdir): loc =", "wrapped = tmpdir / \"wrap.svg\" nowrap = tmpdir / \"nowarap.svg\" MarkupText(ipsum_text, wrap_text=False, filename=str(nowrap))", "veniam, quis nostrud\" \"exercitation ullamco laboris nisi ut aliquip\" \"ex ea commodo consequat.", "tag 'xyz' on line 1 char 14\", ), ], ) def test_bad_markup_error_message(text, error):", "( \"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing elit,\" \"sed do eiusmod tempor", "not loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc)) assert loc.exists() def test_markup_alignment(tmpdir): # don't know how", "dolore\" \"magna aliqua. Ut enim <b>ad</b> minim veniam, quis nostrud\" \"exercitation ullamco laboris", "<i>consectetur</i> adipiscing elit,\" \"sed do eiusmod tempor incididunt ut labore et dolore\" \"magna", "assert s.got_svg_style == s.expected_svg_style def test_wrap_text(tmpdir): tmpdir = Path(tmpdir) wrapped = tmpdir /", "char 14\", ), ], ) def test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text) == error def", "check whether it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, indent=10,", "from .svg_tester import SVGStyleTester ipsum_text = ( \"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i>", "<b>ad</b> minim veniam, quis nostrud\" \"exercitation ullamco laboris nisi ut aliquip\" \"ex ea", "“b”\", ), ( \"<xyz>foo</xyz>\", \"Unknown tag 'xyz' on line 1 char 14\", ),", "aute irure dolor\" \"in reprehenderit in voluptate velit esse cillum\" \"dolore eu fugiat", "how to verify this correctly # it varies upon diffent system so, we", "system so, we are # just check whether it runs loc = Path(tmpdir,", "assert not manimpango.MarkupUtils.validate( text, ), f\"{text} should not fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"])", "underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) ) assert loc.exists() def test_markup_justify(tmpdir): # don't know how to", "\"<xyz>foo</xyz>\", \"Unknown tag 'xyz' on line 1 char 14\", ), ], ) def", "error): assert manimpango.MarkupUtils.validate(text) == error def test_markup_text(tmpdir): loc = Path(tmpdir, \"test.svg\") assert not", "loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc)) assert loc.exists() def test_markup_alignment(tmpdir): # don't know how to", ") def test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text) == error def test_markup_text(tmpdir): loc = Path(tmpdir,", "= ( \"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing elit,\" \"sed do eiusmod", "id est laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text): assert not manimpango.MarkupUtils.validate(", "varies upon diffent system so, we are # just check whether it runs", "validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert manimpango.MarkupUtils.validate( text ), f\"{text} should fail", "\"exercitation ullamco laboris nisi ut aliquip\" \"ex ea commodo consequat. Duis aute irure", "f\"{text} should not fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert manimpango.MarkupUtils.validate( text", "not loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert loc.exists() def test_markup_style(tmpdir): test_case =", "coding: utf-8 -*- from pathlib import Path import pytest import manimpango from .", "def test_markup_justify(tmpdir): # don't know how to verify this correctly # it varies", "tmpdir / \"wrap.svg\" nowrap = tmpdir / \"nowarap.svg\" MarkupText(ipsum_text, wrap_text=False, filename=str(nowrap)) MarkupText(ipsum_text, filename=str(wrapped))", "tempor incididunt ut labore et dolore\" \"magna aliqua. Ut enim <b>ad</b> minim veniam,", ".svg_tester import SVGStyleTester ipsum_text = ( \"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing", "deserunt mollit anim id est laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text):", "assert manimpango.MarkupUtils.validate(text) == error def test_markup_text(tmpdir): loc = Path(tmpdir, \"test.svg\") assert not loc.exists()", "loc.exists() def test_markup_justify(tmpdir): # don't know how to verify this correctly # it", "pariatur. Excepteur sint\" \"occaecat cupidatat non proident, sunt in culpa qui\" \"officia deserunt", "\"வணக்கம்\"]) def test_good_markup(text): assert not manimpango.MarkupUtils.validate( text, ), f\"{text} should not fail validation\"", "justify=True, filename=str(loc)) assert loc.exists() def test_markup_indent(tmpdir): # don't know how to verify this", "import SVGStyleTester ipsum_text = ( \"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing elit,\"", "._manim import MarkupText from .svg_tester import SVGStyleTester ipsum_text = ( \"<b>Lorem ipsum dolor</b>", "\"test.svg\") assert not loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) ) assert loc.exists() def", "/ \"wrap.svg\" nowrap = tmpdir / \"nowarap.svg\" MarkupText(ipsum_text, wrap_text=False, filename=str(nowrap)) MarkupText(ipsum_text, filename=str(wrapped)) assert", "aliquip\" \"ex ea commodo consequat. Duis aute irure dolor\" \"in reprehenderit in voluptate", "ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert loc.exists() def test_markup_style(tmpdir): test_case = CASES_DIR / \"hello_blue_world_green.svg\"", "/ \"expected.svg\" text = \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected), ) s =", "upon diffent system so, we are # just check whether it runs loc", "est laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text): assert not manimpango.MarkupUtils.validate( text,", "= Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert loc.exists()", "SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style) == len(s.expected_svg_style) assert s.got_svg_style == s.expected_svg_style def test_wrap_text(tmpdir): tmpdir", "pytest import manimpango from . import CASES_DIR from ._manim import MarkupText from .svg_tester", "should fail validation (unbalanced tags)\" @pytest.mark.parametrize( \"text,error\", [ ( \"<b>foo\", \"Error on line", "assert not loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc)) assert loc.exists() def test_markup_alignment(tmpdir): # don't know", "Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc)) assert loc.exists() def test_markup_alignment(tmpdir): #", "assert not loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert loc.exists() def test_markup_style(tmpdir): test_case", "), f\"{text} should fail validation (unbalanced tags)\" @pytest.mark.parametrize( \"text,error\", [ ( \"<b>foo\", \"Error", "runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), )", "Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc)) assert loc.exists() def test_markup_indent(tmpdir): #", "test_case = CASES_DIR / \"hello_blue_world_green.svg\" expected = tmpdir / \"expected.svg\" text = \"<span", "= SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style) == len(s.expected_svg_style) assert s.got_svg_style == s.expected_svg_style def test_wrap_text(tmpdir):", "laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text): assert not manimpango.MarkupUtils.validate( text, ),", "cupidatat non proident, sunt in culpa qui\" \"officia deserunt mollit anim id est", "), f\"{text} should not fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert manimpango.MarkupUtils.validate(", "loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc)) assert loc.exists() def", "text = \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected), ) s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case)", "open element is “b”\", ), ( \"<xyz>foo</xyz>\", \"Unknown tag 'xyz' on line 1", "ullamco laboris nisi ut aliquip\" \"ex ea commodo consequat. Duis aute irure dolor\"", "“markup” was closed, \" \"but the currently open element is “b”\", ), (", "eiusmod tempor incididunt ut labore et dolore\" \"magna aliqua. Ut enim <b>ad</b> minim", "Duis aute irure dolor\" \"in reprehenderit in voluptate velit esse cillum\" \"dolore eu", "culpa qui\" \"officia deserunt mollit anim id est laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\",", "element is “b”\", ), ( \"<xyz>foo</xyz>\", \"Unknown tag 'xyz' on line 1 char", "from . import CASES_DIR from ._manim import MarkupText from .svg_tester import SVGStyleTester ipsum_text", "# just check whether it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists()", "loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc)) assert loc.exists() def", "14\", ), ], ) def test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text) == error def test_markup_text(tmpdir):", "loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert", "MarkupText(ipsum_text, indent=10, filename=str(loc)) assert loc.exists() def test_markup_alignment(tmpdir): # don't know how to verify", "= CASES_DIR / \"hello_blue_world_green.svg\" expected = tmpdir / \"expected.svg\" text = \"<span foreground='BLUE'>Hello</span>\\n<span", "test_markup_indent(tmpdir): # don't know how to verify this correctly # it varies upon", "should not fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert manimpango.MarkupUtils.validate( text ),", "labore et dolore\" \"magna aliqua. Ut enim <b>ad</b> minim veniam, quis nostrud\" \"exercitation", "test_markup_style(tmpdir): test_case = CASES_DIR / \"hello_blue_world_green.svg\" expected = tmpdir / \"expected.svg\" text =", "reprehenderit in voluptate velit esse cillum\" \"dolore eu fugiat nulla pariatur. Excepteur sint\"", "the currently open element is “b”\", ), ( \"<xyz>foo</xyz>\", \"Unknown tag 'xyz' on", "manimpango.MarkupUtils.validate( text ), f\"{text} should fail validation (unbalanced tags)\" @pytest.mark.parametrize( \"text,error\", [ (", "check whether it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, justify=True,", "fail validation (unbalanced tags)\" @pytest.mark.parametrize( \"text,error\", [ ( \"<b>foo\", \"Error on line 1", "expectedSVG=test_case) assert len(s.got_svg_style) == len(s.expected_svg_style) assert s.got_svg_style == s.expected_svg_style def test_wrap_text(tmpdir): tmpdir =", "s.expected_svg_style def test_wrap_text(tmpdir): tmpdir = Path(tmpdir) wrapped = tmpdir / \"wrap.svg\" nowrap =", "f\"{text} should fail validation (unbalanced tags)\" @pytest.mark.parametrize( \"text,error\", [ ( \"<b>foo\", \"Error on", "text, ), f\"{text} should not fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert", "this correctly # it varies upon diffent system so, we are # just", "\"Unknown tag 'xyz' on line 1 char 14\", ), ], ) def test_bad_markup_error_message(text,", "[\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text): assert not manimpango.MarkupUtils.validate( text, ), f\"{text} should not", "def test_markup_style(tmpdir): test_case = CASES_DIR / \"hello_blue_world_green.svg\" expected = tmpdir / \"expected.svg\" text", "/ \"hello_blue_world_green.svg\" expected = tmpdir / \"expected.svg\" text = \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText(", "manimpango from . import CASES_DIR from ._manim import MarkupText from .svg_tester import SVGStyleTester", "ut aliquip\" \"ex ea commodo consequat. Duis aute irure dolor\" \"in reprehenderit in", "dolor</b> sit amet, <i>consectetur</i> adipiscing elit,\" \"sed do eiusmod tempor incididunt ut labore", "len(s.expected_svg_style) assert s.got_svg_style == s.expected_svg_style def test_wrap_text(tmpdir): tmpdir = Path(tmpdir) wrapped = tmpdir", "== s.expected_svg_style def test_wrap_text(tmpdir): tmpdir = Path(tmpdir) wrapped = tmpdir / \"wrap.svg\" nowrap", "non proident, sunt in culpa qui\" \"officia deserunt mollit anim id est laborum.\"", "assert not loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) ) assert loc.exists() def test_markup_justify(tmpdir):", "ipsum_text = ( \"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing elit,\" \"sed do", "check whether it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( ipsum_text,", "utf-8 -*- from pathlib import Path import pytest import manimpango from . import", "irure dolor\" \"in reprehenderit in voluptate velit esse cillum\" \"dolore eu fugiat nulla", "is “b”\", ), ( \"<xyz>foo</xyz>\", \"Unknown tag 'xyz' on line 1 char 14\",", "assert loc.exists() def test_markup_style(tmpdir): test_case = CASES_DIR / \"hello_blue_world_green.svg\" expected = tmpdir /", "incididunt ut labore et dolore\" \"magna aliqua. Ut enim <b>ad</b> minim veniam, quis", "= tmpdir / \"nowarap.svg\" MarkupText(ipsum_text, wrap_text=False, filename=str(nowrap)) MarkupText(ipsum_text, filename=str(wrapped)) assert wrapped.read_text() != nowrap.read_text()", "\"ex ea commodo consequat. Duis aute irure dolor\" \"in reprehenderit in voluptate velit", "filename=str(loc) ) assert loc.exists() def test_markup_justify(tmpdir): # don't know how to verify this", "assert manimpango.MarkupUtils.validate( text ), f\"{text} should fail validation (unbalanced tags)\" @pytest.mark.parametrize( \"text,error\", [", "closed, \" \"but the currently open element is “b”\", ), ( \"<xyz>foo</xyz>\", \"Unknown", "minim veniam, quis nostrud\" \"exercitation ullamco laboris nisi ut aliquip\" \"ex ea commodo", "\"hello_blue_world_green.svg\" expected = tmpdir / \"expected.svg\" text = \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text,", "MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) ) assert loc.exists() def test_markup_justify(tmpdir): # don't know", "laboris nisi ut aliquip\" \"ex ea commodo consequat. Duis aute irure dolor\" \"in", "test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text) == error def test_markup_text(tmpdir): loc = Path(tmpdir, \"test.svg\") assert", "aliqua. Ut enim <b>ad</b> minim veniam, quis nostrud\" \"exercitation ullamco laboris nisi ut", "anim id est laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text): assert not", "def test_markup_alignment(tmpdir): # don't know how to verify this correctly # it varies", "it varies upon diffent system so, we are # just check whether it", "not manimpango.MarkupUtils.validate( text, ), f\"{text} should not fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def", "def test_wrap_text(tmpdir): tmpdir = Path(tmpdir) wrapped = tmpdir / \"wrap.svg\" nowrap = tmpdir", "tags)\" @pytest.mark.parametrize( \"text,error\", [ ( \"<b>foo\", \"Error on line 1 char 23: Element", "whether it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER,", "in culpa qui\" \"officia deserunt mollit anim id est laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\",", "filename=str(expected), ) s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style) == len(s.expected_svg_style) assert s.got_svg_style ==", "(unbalanced tags)\" @pytest.mark.parametrize( \"text,error\", [ ( \"<b>foo\", \"Error on line 1 char 23:", "= Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) ) assert", "Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert loc.exists() def", "test_markup_text(tmpdir): loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc)", "was closed, \" \"but the currently open element is “b”\", ), ( \"<xyz>foo</xyz>\",", "= tmpdir / \"wrap.svg\" nowrap = tmpdir / \"nowarap.svg\" MarkupText(ipsum_text, wrap_text=False, filename=str(nowrap)) MarkupText(ipsum_text,", "esse cillum\" \"dolore eu fugiat nulla pariatur. Excepteur sint\" \"occaecat cupidatat non proident,", "Excepteur sint\" \"occaecat cupidatat non proident, sunt in culpa qui\" \"officia deserunt mollit", "manimpango.MarkupUtils.validate(text) == error def test_markup_text(tmpdir): loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(", "\"magna aliqua. Ut enim <b>ad</b> minim veniam, quis nostrud\" \"exercitation ullamco laboris nisi", "def test_markup_text(tmpdir): loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>',", "\"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected), ) s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style)", "to verify this correctly # it varies upon diffent system so, we are", "= tmpdir / \"expected.svg\" text = \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected), )", "ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing elit,\" \"sed do eiusmod tempor incididunt ut", "assert len(s.got_svg_style) == len(s.expected_svg_style) assert s.got_svg_style == s.expected_svg_style def test_wrap_text(tmpdir): tmpdir = Path(tmpdir)", "alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert loc.exists() def test_markup_style(tmpdir): test_case = CASES_DIR / \"hello_blue_world_green.svg\" expected", "proident, sunt in culpa qui\" \"officia deserunt mollit anim id est laborum.\" )", "ea commodo consequat. Duis aute irure dolor\" \"in reprehenderit in voluptate velit esse", "adipiscing elit,\" \"sed do eiusmod tempor incididunt ut labore et dolore\" \"magna aliqua.", "whether it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc))", "it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc),", "expected = tmpdir / \"expected.svg\" text = \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected),", "), ( \"<xyz>foo</xyz>\", \"Unknown tag 'xyz' on line 1 char 14\", ), ],", "MarkupText( ipsum_text, alignment=manimpango.Alignment.CENTER, filename=str(loc), ) assert loc.exists() def test_markup_style(tmpdir): test_case = CASES_DIR /", "23: Element “markup” was closed, \" \"but the currently open element is “b”\",", "et dolore\" \"magna aliqua. Ut enim <b>ad</b> minim veniam, quis nostrud\" \"exercitation ullamco", "\"test.svg\") assert not loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc)) assert loc.exists() def test_markup_indent(tmpdir): # don't", "mollit anim id est laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text): assert", "from ._manim import MarkupText from .svg_tester import SVGStyleTester ipsum_text = ( \"<b>Lorem ipsum", "test_markup_alignment(tmpdir): # don't know how to verify this correctly # it varies upon", "don't know how to verify this correctly # it varies upon diffent system", "runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc)) assert loc.exists()", "foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected), ) s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style) == len(s.expected_svg_style)", "not fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert manimpango.MarkupUtils.validate( text ), f\"{text}", "assert not loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc)) assert loc.exists() def test_markup_indent(tmpdir): # don't know", "char 23: Element “markup” was closed, \" \"but the currently open element is", "test_bad_markup(text): assert manimpango.MarkupUtils.validate( text ), f\"{text} should fail validation (unbalanced tags)\" @pytest.mark.parametrize( \"text,error\",", "\"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing elit,\" \"sed do eiusmod tempor incididunt", "( \"<xyz>foo</xyz>\", \"Unknown tag 'xyz' on line 1 char 14\", ), ], )", "\"occaecat cupidatat non proident, sunt in culpa qui\" \"officia deserunt mollit anim id", "on line 1 char 23: Element “markup” was closed, \" \"but the currently", "cillum\" \"dolore eu fugiat nulla pariatur. Excepteur sint\" \"occaecat cupidatat non proident, sunt", "'xyz' on line 1 char 14\", ), ], ) def test_bad_markup_error_message(text, error): assert", "== len(s.expected_svg_style) assert s.got_svg_style == s.expected_svg_style def test_wrap_text(tmpdir): tmpdir = Path(tmpdir) wrapped =", "# it varies upon diffent system so, we are # just check whether", "line 1 char 14\", ), ], ) def test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text) ==", "not loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) ) assert loc.exists() def test_markup_justify(tmpdir): #", "elit,\" \"sed do eiusmod tempor incididunt ut labore et dolore\" \"magna aliqua. Ut", "whether it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc))", "\"officia deserunt mollit anim id est laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"]) def", "len(s.got_svg_style) == len(s.expected_svg_style) assert s.got_svg_style == s.expected_svg_style def test_wrap_text(tmpdir): tmpdir = Path(tmpdir) wrapped", "velit esse cillum\" \"dolore eu fugiat nulla pariatur. Excepteur sint\" \"occaecat cupidatat non", "we are # just check whether it runs loc = Path(tmpdir, \"test.svg\") assert", "SVGStyleTester ipsum_text = ( \"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing elit,\" \"sed", "it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc)) assert", "\" \"but the currently open element is “b”\", ), ( \"<xyz>foo</xyz>\", \"Unknown tag", "# -*- coding: utf-8 -*- from pathlib import Path import pytest import manimpango", "assert loc.exists() def test_markup_indent(tmpdir): # don't know how to verify this correctly #", "nulla pariatur. Excepteur sint\" \"occaecat cupidatat non proident, sunt in culpa qui\" \"officia", "import manimpango from . import CASES_DIR from ._manim import MarkupText from .svg_tester import", "filename=str(loc)) assert loc.exists() def test_markup_alignment(tmpdir): # don't know how to verify this correctly", "def test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text) == error def test_markup_text(tmpdir): loc = Path(tmpdir, \"test.svg\")", "loc.exists() def test_markup_indent(tmpdir): # don't know how to verify this correctly # it", "assert loc.exists() def test_markup_alignment(tmpdir): # don't know how to verify this correctly #", "\"but the currently open element is “b”\", ), ( \"<xyz>foo</xyz>\", \"Unknown tag 'xyz'", "test_good_markup(text): assert not manimpango.MarkupUtils.validate( text, ), f\"{text} should not fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\",", "currently open element is “b”\", ), ( \"<xyz>foo</xyz>\", \"Unknown tag 'xyz' on line", "in voluptate velit esse cillum\" \"dolore eu fugiat nulla pariatur. Excepteur sint\" \"occaecat", "def test_bad_markup(text): assert manimpango.MarkupUtils.validate( text ), f\"{text} should fail validation (unbalanced tags)\" @pytest.mark.parametrize(", "dolor\" \"in reprehenderit in voluptate velit esse cillum\" \"dolore eu fugiat nulla pariatur.", "CASES_DIR from ._manim import MarkupText from .svg_tester import SVGStyleTester ipsum_text = ( \"<b>Lorem", "diffent system so, we are # just check whether it runs loc =", "validation (unbalanced tags)\" @pytest.mark.parametrize( \"text,error\", [ ( \"<b>foo\", \"Error on line 1 char", "correctly # it varies upon diffent system so, we are # just check", "test_wrap_text(tmpdir): tmpdir = Path(tmpdir) wrapped = tmpdir / \"wrap.svg\" nowrap = tmpdir /", "\"wrap.svg\" nowrap = tmpdir / \"nowarap.svg\" MarkupText(ipsum_text, wrap_text=False, filename=str(nowrap)) MarkupText(ipsum_text, filename=str(wrapped)) assert wrapped.read_text()", "= Path(tmpdir) wrapped = tmpdir / \"wrap.svg\" nowrap = tmpdir / \"nowarap.svg\" MarkupText(ipsum_text,", "so, we are # just check whether it runs loc = Path(tmpdir, \"test.svg\")", "quis nostrud\" \"exercitation ullamco laboris nisi ut aliquip\" \"ex ea commodo consequat. Duis", "import MarkupText from .svg_tester import SVGStyleTester ipsum_text = ( \"<b>Lorem ipsum dolor</b> sit", "manimpango.MarkupUtils.validate( text, ), f\"{text} should not fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text):", "voluptate velit esse cillum\" \"dolore eu fugiat nulla pariatur. Excepteur sint\" \"occaecat cupidatat", "loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) )", "[ ( \"<b>foo\", \"Error on line 1 char 23: Element “markup” was closed,", "amet, <i>consectetur</i> adipiscing elit,\" \"sed do eiusmod tempor incididunt ut labore et dolore\"", "import CASES_DIR from ._manim import MarkupText from .svg_tester import SVGStyleTester ipsum_text = (", "eu fugiat nulla pariatur. Excepteur sint\" \"occaecat cupidatat non proident, sunt in culpa", "Element “markup” was closed, \" \"but the currently open element is “b”\", ),", "indent=10, filename=str(loc)) assert loc.exists() def test_markup_alignment(tmpdir): # don't know how to verify this", "just check whether it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text,", "= \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected), ) s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert", "nostrud\" \"exercitation ullamco laboris nisi ut aliquip\" \"ex ea commodo consequat. Duis aute", "loc.exists() def test_markup_alignment(tmpdir): # don't know how to verify this correctly # it", "consequat. Duis aute irure dolor\" \"in reprehenderit in voluptate velit esse cillum\" \"dolore", "1 char 23: Element “markup” was closed, \" \"but the currently open element", "qui\" \"officia deserunt mollit anim id est laborum.\" ) @pytest.mark.parametrize(\"text\", [\"foo\", \"<b>bar</b>\", \"வணக்கம்\"])", "s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case) assert len(s.got_svg_style) == len(s.expected_svg_style) assert s.got_svg_style == s.expected_svg_style def", "test_markup_justify(tmpdir): # don't know how to verify this correctly # it varies upon", "\"text,error\", [ ( \"<b>foo\", \"Error on line 1 char 23: Element “markup” was", "@pytest.mark.parametrize( \"text,error\", [ ( \"<b>foo\", \"Error on line 1 char 23: Element “markup”", "it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc)) assert", "s.got_svg_style == s.expected_svg_style def test_wrap_text(tmpdir): tmpdir = Path(tmpdir) wrapped = tmpdir / \"wrap.svg\"", "Manim</i></b></span>', filename=str(loc) ) assert loc.exists() def test_markup_justify(tmpdir): # don't know how to verify", "== error def test_markup_text(tmpdir): loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText( '<span", "\"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert manimpango.MarkupUtils.validate( text ), f\"{text} should fail validation (unbalanced tags)\"", "-*- from pathlib import Path import pytest import manimpango from . import CASES_DIR", "do eiusmod tempor incididunt ut labore et dolore\" \"magna aliqua. Ut enim <b>ad</b>", "are # just check whether it runs loc = Path(tmpdir, \"test.svg\") assert not", "filename=str(loc)) assert loc.exists() def test_markup_indent(tmpdir): # don't know how to verify this correctly", "loc.exists() MarkupText( '<span underline=\"error\"><b><i>Hello Manim</i></b></span>', filename=str(loc) ) assert loc.exists() def test_markup_justify(tmpdir): # don't", "just check whether it runs loc = Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(", "\"Error on line 1 char 23: Element “markup” was closed, \" \"but the", "\"<b>foo\", \"Error on line 1 char 23: Element “markup” was closed, \" \"but", "ut labore et dolore\" \"magna aliqua. Ut enim <b>ad</b> minim veniam, quis nostrud\"", "enim <b>ad</b> minim veniam, quis nostrud\" \"exercitation ullamco laboris nisi ut aliquip\" \"ex", "line 1 char 23: Element “markup” was closed, \" \"but the currently open", "= Path(tmpdir, \"test.svg\") assert not loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc)) assert loc.exists() def test_markup_alignment(tmpdir):", "commodo consequat. Duis aute irure dolor\" \"in reprehenderit in voluptate velit esse cillum\"", "nisi ut aliquip\" \"ex ea commodo consequat. Duis aute irure dolor\" \"in reprehenderit", "import Path import pytest import manimpango from . import CASES_DIR from ._manim import", "MarkupText from .svg_tester import SVGStyleTester ipsum_text = ( \"<b>Lorem ipsum dolor</b> sit amet,", "on line 1 char 14\", ), ], ) def test_bad_markup_error_message(text, error): assert manimpango.MarkupUtils.validate(text)", "fugiat nulla pariatur. Excepteur sint\" \"occaecat cupidatat non proident, sunt in culpa qui\"", "\"expected.svg\" text = \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected), ) s = SVGStyleTester(gotSVG=expected,", "sint\" \"occaecat cupidatat non proident, sunt in culpa qui\" \"officia deserunt mollit anim", "def test_markup_indent(tmpdir): # don't know how to verify this correctly # it varies", "\"test.svg\") assert not loc.exists() MarkupText(ipsum_text, indent=10, filename=str(loc)) assert loc.exists() def test_markup_alignment(tmpdir): # don't", "\"<b>bar</b>\", \"வணக்கம்\"]) def test_good_markup(text): assert not manimpango.MarkupUtils.validate( text, ), f\"{text} should not fail", "fail validation\" @pytest.mark.parametrize(\"text\", [\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert manimpango.MarkupUtils.validate( text ), f\"{text} should", "tmpdir / \"expected.svg\" text = \"<span foreground='BLUE'>Hello</span>\\n<span foreground='GREEN'>World</span>\" MarkupText( text, filename=str(expected), ) s", "not loc.exists() MarkupText(ipsum_text, justify=True, filename=str(loc)) assert loc.exists() def test_markup_indent(tmpdir): # don't know how", "[\"<b>foo\", \"<xyz>foo</xyz>\"]) def test_bad_markup(text): assert manimpango.MarkupUtils.validate( text ), f\"{text} should fail validation (unbalanced", ") assert loc.exists() def test_markup_justify(tmpdir): # don't know how to verify this correctly", "( \"<b>foo\", \"Error on line 1 char 23: Element “markup” was closed, \"" ]
[]
[ "\"\"\" self._module.close() super().close_server() class Client(ClientBase): def measure_voltage(self, board, channel): return self._service.exposed_measure_voltage(board, channel) def", "channel, voltage): return self._service.exposed_set_low_voltage(board, channel, voltage) def get_high_voltage(self, board, channel): return self._service.exposed_get_high_voltage(board, channel)", "set_high_voltage(self, board, channel, voltage): return self._service.exposed_set_high_voltage(board, channel, voltage) def set_low_voltage(self, board, channel, voltage):", "self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self, board, channel): return self._module.get_low_voltage(board, channel) def exposed_save(self): return self._module.save()", "channel): return self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self, board, channel, voltage): return self._module.set_high_voltage(board, channel, voltage)", "return self._module.set_low_voltage(board, channel, voltage) def exposed_get_high_voltage(self, board, channel): return self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self,", "exposed_measure_voltage(self, board, channel): return self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self, board, channel, voltage): return self._module.set_high_voltage(board,", "self._service.exposed_get_low_voltage(board, channel) def save(self): return self._service.exposed_save() def override(self, board, channel, state=True): return self._service.exposed_override(board,", "self._module.get_low_voltage(board, channel) def exposed_save(self): return self._module.save() def exposed_override(self, board, channel, state=True): return self._module.override(board,", "super().close_server() class Client(ClientBase): def measure_voltage(self, board, channel): return self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self, board,", "exposed_save(self): return self._module.save() def exposed_override(self, board, channel, state=True): return self._module.override(board, channel, state) def", "which the service is running Overwrites parent class method \"\"\" self._module.close() super().close_server() class", "method \"\"\" self._module.close() super().close_server() class Client(ClientBase): def measure_voltage(self, board, channel): return self._service.exposed_measure_voltage(board, channel)", "channel, state=True): return self._service.exposed_override(board, channel, state) def disable_override(self, board, channel): return self._service.exposed_disable_override(board, channel)", "return self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self, board, channel, voltage): return self._module.set_high_voltage(board, channel, voltage) def", "self._module.save() def exposed_override(self, board, channel, state=True): return self._module.override(board, channel, state) def exposed_disable_override(self, board,", "def set_low_voltage(self, board, channel, voltage): return self._service.exposed_set_low_voltage(board, channel, voltage) def get_high_voltage(self, board, channel):", "channel) def exposed_save(self): return self._module.save() def exposed_override(self, board, channel, state=True): return self._module.override(board, channel,", "channel, voltage) def exposed_get_high_voltage(self, board, channel): return self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self, board, channel):", "board, channel): return self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self, board, channel, voltage): return self._module.set_high_voltage(board, channel,", "channel, voltage) def set_low_voltage(self, board, channel, voltage): return self._service.exposed_set_low_voltage(board, channel, voltage) def get_high_voltage(self,", "channel, voltage): return self._service.exposed_set_high_voltage(board, channel, voltage) def set_low_voltage(self, board, channel, voltage): return self._service.exposed_set_low_voltage(board,", "class method \"\"\" self._module.close() super().close_server() class Client(ClientBase): def measure_voltage(self, board, channel): return self._service.exposed_measure_voltage(board,", "board, channel, voltage): return self._service.exposed_set_low_voltage(board, channel, voltage) def get_high_voltage(self, board, channel): return self._service.exposed_get_high_voltage(board,", "channel) def get_low_voltage(self, board, channel): return self._service.exposed_get_low_voltage(board, channel) def save(self): return self._service.exposed_save() def", "import ServiceBase from pylabnet.network.core.client_base import ClientBase class Service(ServiceBase): def exposed_measure_voltage(self, board, channel): return", "return self._service.exposed_get_low_voltage(board, channel) def save(self): return self._service.exposed_save() def override(self, board, channel, state=True): return", "return self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self, board, channel): return self._module.get_low_voltage(board, channel) def exposed_save(self): return", "set_low_voltage(self, board, channel, voltage): return self._service.exposed_set_low_voltage(board, channel, voltage) def get_high_voltage(self, board, channel): return", "board, channel, voltage): return self._module.set_high_voltage(board, channel, voltage) def exposed_set_low_voltage(self, board, channel, voltage): return", "self._service.exposed_save() def override(self, board, channel, state=True): return self._service.exposed_override(board, channel, state) def disable_override(self, board,", "\"\"\" Closes the server for which the service is running Overwrites parent class", "channel, voltage): return self._module.set_high_voltage(board, channel, voltage) def exposed_set_low_voltage(self, board, channel, voltage): return self._module.set_low_voltage(board,", "from pylabnet.network.core.client_base import ClientBase class Service(ServiceBase): def exposed_measure_voltage(self, board, channel): return self._module.measure_voltage(board, channel)", "channel) def exposed_set_high_voltage(self, board, channel, voltage): return self._module.set_high_voltage(board, channel, voltage) def exposed_set_low_voltage(self, board,", "<gh_stars>1-10 from pylabnet.network.core.service_base import ServiceBase from pylabnet.network.core.client_base import ClientBase class Service(ServiceBase): def exposed_measure_voltage(self,", "return self._module.disable_override(board, channel) def close_server(self): \"\"\" Closes the server for which the service", "board, channel, state=True): return self._service.exposed_override(board, channel, state) def disable_override(self, board, channel): return self._service.exposed_disable_override(board,", "ClientBase class Service(ServiceBase): def exposed_measure_voltage(self, board, channel): return self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self, board,", "self._module.set_low_voltage(board, channel, voltage) def exposed_get_high_voltage(self, board, channel): return self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self, board,", "pylabnet.network.core.client_base import ClientBase class Service(ServiceBase): def exposed_measure_voltage(self, board, channel): return self._module.measure_voltage(board, channel) def", "channel, state) def exposed_disable_override(self, board, channel): return self._module.disable_override(board, channel) def close_server(self): \"\"\" Closes", "class Client(ClientBase): def measure_voltage(self, board, channel): return self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self, board, channel,", "board, channel): return self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self, board, channel): return self._module.get_low_voltage(board, channel) def", "self._service.exposed_set_low_voltage(board, channel, voltage) def get_high_voltage(self, board, channel): return self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self, board,", "service is running Overwrites parent class method \"\"\" self._module.close() super().close_server() class Client(ClientBase): def", "class Service(ServiceBase): def exposed_measure_voltage(self, board, channel): return self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self, board, channel,", "close_server(self): \"\"\" Closes the server for which the service is running Overwrites parent", "board, channel): return self._module.disable_override(board, channel) def close_server(self): \"\"\" Closes the server for which", "running Overwrites parent class method \"\"\" self._module.close() super().close_server() class Client(ClientBase): def measure_voltage(self, board,", "return self._module.save() def exposed_override(self, board, channel, state=True): return self._module.override(board, channel, state) def exposed_disable_override(self,", "self._module.set_high_voltage(board, channel, voltage) def exposed_set_low_voltage(self, board, channel, voltage): return self._module.set_low_voltage(board, channel, voltage) def", "is running Overwrites parent class method \"\"\" self._module.close() super().close_server() class Client(ClientBase): def measure_voltage(self,", "board, channel): return self._service.exposed_get_low_voltage(board, channel) def save(self): return self._service.exposed_save() def override(self, board, channel,", "channel, voltage): return self._module.set_low_voltage(board, channel, voltage) def exposed_get_high_voltage(self, board, channel): return self._module.get_high_voltage(board, channel)", "def exposed_override(self, board, channel, state=True): return self._module.override(board, channel, state) def exposed_disable_override(self, board, channel):", "self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self, board, channel, voltage): return self._module.set_high_voltage(board, channel, voltage) def exposed_set_low_voltage(self,", "exposed_set_low_voltage(self, board, channel, voltage): return self._module.set_low_voltage(board, channel, voltage) def exposed_get_high_voltage(self, board, channel): return", "return self._service.exposed_save() def override(self, board, channel, state=True): return self._service.exposed_override(board, channel, state) def disable_override(self,", "def override(self, board, channel, state=True): return self._service.exposed_override(board, channel, state) def disable_override(self, board, channel):", "ServiceBase from pylabnet.network.core.client_base import ClientBase class Service(ServiceBase): def exposed_measure_voltage(self, board, channel): return self._module.measure_voltage(board,", "channel) def exposed_get_low_voltage(self, board, channel): return self._module.get_low_voltage(board, channel) def exposed_save(self): return self._module.save() def", "channel): return self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self, board, channel): return self._service.exposed_get_low_voltage(board, channel) def save(self):", "channel) def close_server(self): \"\"\" Closes the server for which the service is running", "self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self, board, channel): return self._service.exposed_get_low_voltage(board, channel) def save(self): return self._service.exposed_save()", "save(self): return self._service.exposed_save() def override(self, board, channel, state=True): return self._service.exposed_override(board, channel, state) def", "voltage) def exposed_set_low_voltage(self, board, channel, voltage): return self._module.set_low_voltage(board, channel, voltage) def exposed_get_high_voltage(self, board,", "voltage): return self._service.exposed_set_high_voltage(board, channel, voltage) def set_low_voltage(self, board, channel, voltage): return self._service.exposed_set_low_voltage(board, channel,", "return self._module.override(board, channel, state) def exposed_disable_override(self, board, channel): return self._module.disable_override(board, channel) def close_server(self):", "get_high_voltage(self, board, channel): return self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self, board, channel): return self._service.exposed_get_low_voltage(board, channel)", "server for which the service is running Overwrites parent class method \"\"\" self._module.close()", "def exposed_save(self): return self._module.save() def exposed_override(self, board, channel, state=True): return self._module.override(board, channel, state)", "the server for which the service is running Overwrites parent class method \"\"\"", "self._module.override(board, channel, state) def exposed_disable_override(self, board, channel): return self._module.disable_override(board, channel) def close_server(self): \"\"\"", "parent class method \"\"\" self._module.close() super().close_server() class Client(ClientBase): def measure_voltage(self, board, channel): return", "return self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self, board, channel, voltage): return self._service.exposed_set_high_voltage(board, channel, voltage) def", "self._module.close() super().close_server() class Client(ClientBase): def measure_voltage(self, board, channel): return self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self,", "board, channel): return self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self, board, channel): return self._service.exposed_get_low_voltage(board, channel) def", "board, channel, state=True): return self._module.override(board, channel, state) def exposed_disable_override(self, board, channel): return self._module.disable_override(board,", "def exposed_measure_voltage(self, board, channel): return self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self, board, channel, voltage): return", "voltage): return self._module.set_low_voltage(board, channel, voltage) def exposed_get_high_voltage(self, board, channel): return self._module.get_high_voltage(board, channel) def", "state=True): return self._module.override(board, channel, state) def exposed_disable_override(self, board, channel): return self._module.disable_override(board, channel) def", "def set_high_voltage(self, board, channel, voltage): return self._service.exposed_set_high_voltage(board, channel, voltage) def set_low_voltage(self, board, channel,", "def get_low_voltage(self, board, channel): return self._service.exposed_get_low_voltage(board, channel) def save(self): return self._service.exposed_save() def override(self,", "voltage): return self._service.exposed_set_low_voltage(board, channel, voltage) def get_high_voltage(self, board, channel): return self._service.exposed_get_high_voltage(board, channel) def", "def get_high_voltage(self, board, channel): return self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self, board, channel): return self._service.exposed_get_low_voltage(board,", "the service is running Overwrites parent class method \"\"\" self._module.close() super().close_server() class Client(ClientBase):", "Client(ClientBase): def measure_voltage(self, board, channel): return self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self, board, channel, voltage):", "def exposed_disable_override(self, board, channel): return self._module.disable_override(board, channel) def close_server(self): \"\"\" Closes the server", "channel, voltage) def exposed_set_low_voltage(self, board, channel, voltage): return self._module.set_low_voltage(board, channel, voltage) def exposed_get_high_voltage(self,", "self._module.disable_override(board, channel) def close_server(self): \"\"\" Closes the server for which the service is", "channel): return self._service.exposed_get_low_voltage(board, channel) def save(self): return self._service.exposed_save() def override(self, board, channel, state=True):", "import ClientBase class Service(ServiceBase): def exposed_measure_voltage(self, board, channel): return self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self,", "return self._service.exposed_set_low_voltage(board, channel, voltage) def get_high_voltage(self, board, channel): return self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self,", "channel): return self._module.get_low_voltage(board, channel) def exposed_save(self): return self._module.save() def exposed_override(self, board, channel, state=True):", "board, channel): return self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self, board, channel, voltage): return self._service.exposed_set_high_voltage(board, channel,", "state) def exposed_disable_override(self, board, channel): return self._module.disable_override(board, channel) def close_server(self): \"\"\" Closes the", "board, channel, voltage): return self._service.exposed_set_high_voltage(board, channel, voltage) def set_low_voltage(self, board, channel, voltage): return", "exposed_set_high_voltage(self, board, channel, voltage): return self._module.set_high_voltage(board, channel, voltage) def exposed_set_low_voltage(self, board, channel, voltage):", "channel, voltage) def get_high_voltage(self, board, channel): return self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self, board, channel):", "channel) def save(self): return self._service.exposed_save() def override(self, board, channel, state=True): return self._service.exposed_override(board, channel,", "pylabnet.network.core.service_base import ServiceBase from pylabnet.network.core.client_base import ClientBase class Service(ServiceBase): def exposed_measure_voltage(self, board, channel):", "def exposed_get_high_voltage(self, board, channel): return self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self, board, channel): return self._module.get_low_voltage(board,", "exposed_disable_override(self, board, channel): return self._module.disable_override(board, channel) def close_server(self): \"\"\" Closes the server for", "board, channel, voltage): return self._module.set_low_voltage(board, channel, voltage) def exposed_get_high_voltage(self, board, channel): return self._module.get_high_voltage(board,", "for which the service is running Overwrites parent class method \"\"\" self._module.close() super().close_server()", "def exposed_set_low_voltage(self, board, channel, voltage): return self._module.set_low_voltage(board, channel, voltage) def exposed_get_high_voltage(self, board, channel):", "return self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self, board, channel): return self._service.exposed_get_low_voltage(board, channel) def save(self): return", "return self._module.get_low_voltage(board, channel) def exposed_save(self): return self._module.save() def exposed_override(self, board, channel, state=True): return", "exposed_override(self, board, channel, state=True): return self._module.override(board, channel, state) def exposed_disable_override(self, board, channel): return", "voltage) def set_low_voltage(self, board, channel, voltage): return self._service.exposed_set_low_voltage(board, channel, voltage) def get_high_voltage(self, board,", "def measure_voltage(self, board, channel): return self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self, board, channel, voltage): return", "channel) def set_high_voltage(self, board, channel, voltage): return self._service.exposed_set_high_voltage(board, channel, voltage) def set_low_voltage(self, board,", "return self._service.exposed_set_high_voltage(board, channel, voltage) def set_low_voltage(self, board, channel, voltage): return self._service.exposed_set_low_voltage(board, channel, voltage)", "voltage): return self._module.set_high_voltage(board, channel, voltage) def exposed_set_low_voltage(self, board, channel, voltage): return self._module.set_low_voltage(board, channel,", "def close_server(self): \"\"\" Closes the server for which the service is running Overwrites", "def exposed_get_low_voltage(self, board, channel): return self._module.get_low_voltage(board, channel) def exposed_save(self): return self._module.save() def exposed_override(self,", "return self._module.set_high_voltage(board, channel, voltage) def exposed_set_low_voltage(self, board, channel, voltage): return self._module.set_low_voltage(board, channel, voltage)", "override(self, board, channel, state=True): return self._service.exposed_override(board, channel, state) def disable_override(self, board, channel): return", "from pylabnet.network.core.service_base import ServiceBase from pylabnet.network.core.client_base import ClientBase class Service(ServiceBase): def exposed_measure_voltage(self, board,", "exposed_get_high_voltage(self, board, channel): return self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self, board, channel): return self._module.get_low_voltage(board, channel)", "get_low_voltage(self, board, channel): return self._service.exposed_get_low_voltage(board, channel) def save(self): return self._service.exposed_save() def override(self, board,", "channel): return self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self, board, channel): return self._module.get_low_voltage(board, channel) def exposed_save(self):", "Overwrites parent class method \"\"\" self._module.close() super().close_server() class Client(ClientBase): def measure_voltage(self, board, channel):", "channel): return self._module.disable_override(board, channel) def close_server(self): \"\"\" Closes the server for which the", "def save(self): return self._service.exposed_save() def override(self, board, channel, state=True): return self._service.exposed_override(board, channel, state)", "board, channel): return self._module.get_low_voltage(board, channel) def exposed_save(self): return self._module.save() def exposed_override(self, board, channel,", "exposed_get_low_voltage(self, board, channel): return self._module.get_low_voltage(board, channel) def exposed_save(self): return self._module.save() def exposed_override(self, board,", "self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self, board, channel, voltage): return self._service.exposed_set_high_voltage(board, channel, voltage) def set_low_voltage(self,", "voltage) def get_high_voltage(self, board, channel): return self._service.exposed_get_high_voltage(board, channel) def get_low_voltage(self, board, channel): return", "def exposed_set_high_voltage(self, board, channel, voltage): return self._module.set_high_voltage(board, channel, voltage) def exposed_set_low_voltage(self, board, channel,", "Service(ServiceBase): def exposed_measure_voltage(self, board, channel): return self._module.measure_voltage(board, channel) def exposed_set_high_voltage(self, board, channel, voltage):", "Closes the server for which the service is running Overwrites parent class method", "measure_voltage(self, board, channel): return self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self, board, channel, voltage): return self._service.exposed_set_high_voltage(board,", "self._service.exposed_set_high_voltage(board, channel, voltage) def set_low_voltage(self, board, channel, voltage): return self._service.exposed_set_low_voltage(board, channel, voltage) def", "channel): return self._service.exposed_measure_voltage(board, channel) def set_high_voltage(self, board, channel, voltage): return self._service.exposed_set_high_voltage(board, channel, voltage)", "channel, state=True): return self._module.override(board, channel, state) def exposed_disable_override(self, board, channel): return self._module.disable_override(board, channel)", "voltage) def exposed_get_high_voltage(self, board, channel): return self._module.get_high_voltage(board, channel) def exposed_get_low_voltage(self, board, channel): return" ]
[ "= 0 WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id, ))", "return None def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False): query = f'''UPDATE instalacao_eletrica SET", "for k in kwargs.keys())) + tuple([id])) if commit: self.conn.commit() except sqlite3.Error as e:", "WHERE id_inq = ?''' try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in", "raise Exception(\"Necessário prover um Nome\") if rg is None: raise Exception(\"Necessário prover um", "FROM pagamento; \"\"\") pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc': x[1]", "key != 'num_instalacao' else '' } = ?\" for key in kwargs.keys()])} WHERE", "prover um ID\") C = self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] ) query = '''UPDATE contrato", "DISTINCT id_inq from contrato where ativo); \"\"\") elif inativos: cursor.execute(\"\"\" select * from", "INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS pagamento( id_pag INTEGER NOT", "novas informações para o Inquilino\") query = f'''UPDATE casa SET {', '.join([f\"{key}{'_casa' if", "!= 'num_instalacao' else '' } = ?\" for key in kwargs.keys()])} WHERE id_casa", "if dia_vencimento is None: raise Exception(\"Necessário prover uma data de vencimento\") if casa", "for x in instalacoes] class Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None, nome=None, rg=None, commit=False, rollback=False):", "i.num_instalacao, cpf_titular FROM casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao", "if nome is None: raise Exception(\"Necessário prover nome.\") if valor_aluguel is None: raise", "if rollback: self.conn.rollback() class Contrato_DAO(DAO): def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None, fim_contrato=None, casa=None, inq=None,", "None def todos_inquilinos(self, ativos=False, inativos=False): cursor = self.conn.cursor() if ativos and inativos: raise", "= cursor.fetchall() return [{ 'id_inq': x[0], 'cpf_inq': x[1], 'nome_inq': x[2], 'rg_inq': x[3] }", "NULL, dia_venc_aluguel INTEGER NOT NULL, id_casa INTEGER NOT NULL, id_inq INTEGER NOT NULL", "'valor': valor, 'ativo': ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa': casa, 'id_inq': inq }", "dt_pag is None: raise Exception(\"Necessário prover uma data de pagamento\") try: cursor =", "raise Exception(\"Necessário prover novas informações para o Inquilino\") query = f'''UPDATE casa SET", "for x in contratos] def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False): if id is", "if id_casa and id_casa in [x['id_casa'] for x in c.todos_contratos() if x['ativo']]: raise", "rollback: self.conn.rollback() return None def todos_inquilinos(self, ativos=False, inativos=False): cursor = self.conn.cursor() if ativos", "rollback=False): query = f'''UPDATE instalacao_eletrica SET cpf_titular = ? WHERE num_instalacao = ?", "Exception(\"Necessário prover um ID\") if valor is None: raise Exception(\"Necessário prover um valor\")", "= self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\" INSERT INTO contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq)", "'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] } for x in contratos]", "} except sqlite3.Error as e: # e if rollback: self.conn.rollback() return None def", "where ativo); \"\"\") elif inativos: cursor.execute(\"\"\" select * from inquilino where id_inq not", "self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato; \"\"\") contratos = cursor.fetchall() return [{ 'id_contrato':", "e if rollback: self.conn.rollback() return None def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False): query", "cursor = self.conn.cursor() cursor.execute(query, (cpf, num_instalacao)) if commit: self.conn.commit() except sqlite3.Error as e:", "cpf is None: raise Exception(\"Necessário prover um número de CPF\") if nome is", "valor = ? WHERE id_contrato = ?''' print(query) try: cursor = self.conn.cursor() cursor.execute(query,", "valor, 'ativo': ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa': casa, 'id_inq': inq } except", "None: raise Exception(\"Necessário prover um RG\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO", "ID\") C = self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] ) query = '''UPDATE contrato SET ativo", "raise Exception(\"Necessário prover uma data de vencimento\") if dt_pag is None: raise Exception(\"Necessário", "if commit: self.conn.commit() return { 'id_inq': cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq': nome, 'rg_inq': rg", "sqlite3.Error as e: if rollback: self.conn.rollback() def get_contrato(self, id): cursor = self.conn.cursor() cursor.execute(\"\"\"", "inquilinos = cursor.fetchall() return [{ 'id_inq': x[0], 'cpf_inq': x[1], 'nome_inq': x[2], 'rg_inq': x[3]", "= self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id])) if commit: self.conn.commit()", "NOT NULL PRIMARY KEY, nome_casa INTEGER NOT NULL, valor_aluguel_casa INTEGER NOT NULL, agua_casa", "um RG\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq, nome_inq, rg_inq) VALUES", "x[1] , 'dt_pag': x[2] , 'deposito': x[3] , 'id_contrato': x[4] } for x", "UNIQUE, FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) ); CREATE TABLE IF NOT EXISTS inquilino(", "FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) ); CREATE TABLE IF NOT EXISTS inquilino( id_inq", "KEY, cpf_inq VARCHAR(11) NOT NULL UNIQUE, nome_inq VARCHAR(40) NOT NULL, rg_inq VARCHAR(10) NOT", "WHERE num_instalacao = ? ''' # return None try: cursor = self.conn.cursor() cursor.execute(query,", "as e: if rollback: self.conn.rollback() return None def todos_pagamentos(self): cursor = self.conn.cursor() cursor.execute(\"\"\"", "valor para o aluguel.\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO casa(nome_casa, valor_aluguel_casa,", "} for x in inquilinos] def altera_inquilino(self, id=None, commit=False, rollback=False, **kwargs): if id", "WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id,)) if commit: self.conn.commit()", "INTO inquilino(cpf_inq, nome_inq, rg_inq) VALUES (?, ?, ?) \"\"\", (cpf, nome, rg)) if", "SELECT casa.id_casa from casa JOIN contrato ON contrato.id_casa= casa.id_casa WHERE ativo ) GROUP", "if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def inativa_contrato(self, id=None,", "num_instalacao VARCHAR(11) UNIQUE, FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) ); CREATE TABLE IF NOT", "FROM pagamento WHERE pagamento.id_contrato = ?; \"\"\", (id_contrato)) pagamentos = cursor.fetchall() return [{", "'num_instalacao' else '' } = ?\" for key in kwargs.keys()])} WHERE id_casa =", "cursor = self.conn.cursor() cursor.execute(query, (id,)) if commit: self.conn.commit() except sqlite3.Error as e: if", "inquilinos] def altera_inquilino(self, id=None, commit=False, rollback=False, **kwargs): if id is None: raise Exception(\"Necessário", "if valor_aluguel is None: raise Exception(\"Necessário prover um valor para o aluguel.\") try:", "contrato where ativo); \"\"\") else: cursor.execute(\"\"\" SELECT * from inquilino; \"\"\") inquilinos =", "num_instalacao = ? ''' # return None try: cursor = self.conn.cursor() cursor.execute(query, (cpf,", "is None: raise Exception(\"Necessário prover um RG\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT", "\"\"\", tuple([id])) contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo': x[2],", "sqlite3.Error as e: if rollback: self.conn.rollback() def todas_instalacoes(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT", "\"\"\") casas = cursor.fetchall() return [{ 'id_casa': x[0], 'nome_casa': x[1], 'valor_aluguel': x[2], 'agua_casa':", "inquilino where id_inq in (select DISTINCT id_inq from contrato where ativo); \"\"\") elif", "PRIMARY KEY, dt_venc VARCHAR(23) NOT NULL, dt_pag VARCHAR(23), deposito INTEGER NOT NULL, id_contrato", "c.num_instalacao = i.num_instalacao WHERE c.id_casa NOT IN ( SELECT casa.id_casa from casa JOIN", ") GROUP BY c.id_casa; \"\"\") else: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao,", "is None: raise Exception(\"Necessário prover um ID\") if not len(kwargs): raise Exception(\"Necessário prover", "x['ativo']]: raise InquilinoException() if id_casa and id_casa in [x['id_casa'] for x in c.todos_contratos()", "self.conn.rollback() def get_contrato(self, id): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato WHERE", "self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None,", "de pagamento\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc, dt_pag, deposito, id_contrato)", "VARCHAR(23) NOT NULL, dt_pag VARCHAR(23), deposito INTEGER NOT NULL, id_contrato INTEGER , FOREIGN", "= f'''UPDATE contrato SET valor = ? WHERE id_contrato = ?''' print(query) try:", "dt_venc=None, deposito=False, commit=False, rollback=False): if id_contrato is None: raise Exception(\"Necessário prover um contrato\")", "return [{ 'id_casa': x[0], 'nome_casa': x[1], 'valor_aluguel': x[2], 'agua_casa': x[3], 'num_instalacao_eletrica': x[4], 'cpf':", "INSERT INTO contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq) VALUES (?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato,", "# https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import config def make_connection(): return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ...", "x in pagamentos] def todos_pagamentos_contrato(self, id_contrato): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM", "?\" for key in kwargs.keys()])} WHERE id_casa = ?''' # return None try:", "return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ... class CasaException(Exception): ... class DAO(): def __init__(self, conn):", "if commit: self.conn.commit() return { 'id_pag': cursor.lastrowid , 'dt_venc': dt_venc , 'dt_pag': dt_pag", "Casa_DAO(DAO): def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False, rollback=False): if nome is None:", "def start_db(conn): cursor = conn.cursor() cursor.executescript(\"\"\" CREATE TABLE IF NOT EXISTS instalacao_eletrica (", "* FROM contrato; \"\"\") contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1],", "NULL ); CREATE TABLE IF NOT EXISTS pagamento( id_pag INTEGER NOT NULL PRIMARY", "rollback=False): if num_instalacao is None: raise Exception(\"Necessário prover um número de instalação\") if", "Exception(\"Necessário prover nome.\") if valor_aluguel is None: raise Exception(\"Necessário prover um valor para", "def make_connection(): return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ... class CasaException(Exception): ... class DAO(): def", "query = f'''UPDATE casa SET {', '.join([f\"{key}{'_casa' if key != 'num_instalacao' else ''", "NOT NULL, id_inq INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS pagamento(", "EXISTS instalacao_eletrica ( num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY, cpf_titular VARCHAR(11) NOT NULL", "[{ 'id_contrato': x[0], 'valor': x[1], 'ativo': x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5],", "um Nome\") if rg is None: raise Exception(\"Necessário prover um RG\") try: cursor", "select * from inquilino where id_inq in (select DISTINCT id_inq from contrato where", "{ 'id_contrato': cursor.lastrowid, 'valor': valor, 'ativo': ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa': casa,", "import sqlite3 import config def make_connection(): return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ... class CasaException(Exception):", "x[5] } for x in casas] def altera_casa(self, id=None, commit=False, rollback=False, **kwargs): if", "casa, 'id_inq': inq } except sqlite3.Error as e: if rollback: self.conn.rollback() return None", "try: cursor = self.conn.cursor() cursor.execute(query, (id,)) if commit: self.conn.commit() except sqlite3.Error as e:", ", 'dt_venc': x[1] , 'dt_pag': x[2] , 'deposito': x[3] , 'id_contrato': x[4] }", "KEY, dt_venc VARCHAR(23) NOT NULL, dt_pag VARCHAR(23), deposito INTEGER NOT NULL, id_contrato INTEGER", "NOT NULL, valor_aluguel_casa INTEGER NOT NULL, agua_casa VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE, FOREIGN KEY", "casa SET {', '.join([f\"{key}{'_casa' if key != 'num_instalacao' else '' } = ?\"", "[{ 'id_inq': x[0], 'cpf_inq': x[1], 'nome_inq': x[2], 'rg_inq': x[3] } for x in", "raise Exception(\"Necessário prover um valor\") query = f'''UPDATE contrato SET valor = ?", "inquilino; \"\"\") inquilinos = cursor.fetchall() return [{ 'id_inq': x[0], 'cpf_inq': x[1], 'nome_inq': x[2],", "is None: raise Exception(\"Necessário prover um contrato\") if dt_venc is None: raise Exception(\"Necessário", "x in c.todos_contratos() if x['ativo']]: raise InquilinoException() if id_casa and id_casa in [x['id_casa']", "key in kwargs.keys()])} WHERE id_inq = ?''' try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k]", "\"\"\", (num_instalacao, cpf)) if commit: self.conn.commit() return { 'num_instalacao': num_instalacao, 'cpf_titular': cpf }", "dt_venc , 'dt_pag': dt_pag , 'deposito': deposito , 'id_contrato': id_contrato } except sqlite3.Error", "Exception(\"Necessário prover um valor para o aluguel.\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT", "None def todas_casas(self, vazias=False): cursor = self.conn.cursor() if vazias: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa,", "id_contrato = ?''' print(query) try: cursor = self.conn.cursor() cursor.execute(query, (valor, id)) if commit:", "rollback: self.conn.rollback() class Contrato_DAO(DAO): def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None, fim_contrato=None, casa=None, inq=None, commit=False,", "NOT NULL PRIMARY KEY, valor REAL NOT NULL, ativo INTEGER NOT NULL, dt_fim_contrato", "= conn class Casa_DAO(DAO): def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False, rollback=False): if", "casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao; \"\"\") casas =", "tuple([id])) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO):", "cursor.execute(\"\"\" SELECT * from inquilino; \"\"\") inquilinos = cursor.fetchall() return [{ 'id_inq': x[0],", "); CREATE TABLE IF NOT EXISTS casa( id_casa INTEGER NOT NULL PRIMARY KEY,", "NOT NULL, ativo INTEGER NOT NULL, dt_fim_contrato DATE NOT NULL, dia_venc_aluguel INTEGER NOT", "rollback=False): if cpf is None: raise Exception(\"Necessário prover um número de CPF\") if", "\"\"\" API do Gerenciador de Casas de Aluguel ====================================== \"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import", "else: cursor.execute(\"\"\" SELECT * from inquilino; \"\"\") inquilinos = cursor.fetchall() return [{ 'id_inq':", "if rollback: self.conn.rollback() return None def todos_pagamentos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT *", "ativo = 0 WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id,", "try: cursor = self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\" INSERT INTO contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel,", "\"\"\") pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc': x[1] , 'dt_pag':", "rollback: self.conn.rollback() def inativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário", "NOT NULL PRIMARY KEY, cpf_titular VARCHAR(11) NOT NULL UNIQUE ); CREATE TABLE IF", "SELECT * FROM contrato; \"\"\") contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor':", "prover um ID\") if valor is None: raise Exception(\"Necessário prover um valor\") query", "kwargs.keys())) + tuple([id])) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback()", "commit=False, rollback=False): query = f'''UPDATE instalacao_eletrica SET cpf_titular = ? WHERE num_instalacao =", "id_inq in [x['id_inq'] for x in c.todos_contratos() if x['ativo']]: raise InquilinoException() if id_casa", "cursor.fetchall() return [{ 'id_inq': x[0], 'cpf_inq': x[1], 'nome_inq': x[2], 'rg_inq': x[3] } for", "(dt_venc, dt_pag, deposito, id_contrato)) if commit: self.conn.commit() return { 'id_pag': cursor.lastrowid , 'dt_venc':", "i.num_instalacao; \"\"\") casas = cursor.fetchall() return [{ 'id_casa': x[0], 'nome_casa': x[1], 'valor_aluguel': x[2],", "num_instalacao, 'cpf_titular': cpf } except sqlite3.Error as e: # e if rollback: self.conn.rollback()", "contrato ON contrato.id_casa= casa.id_casa WHERE ativo ) GROUP BY c.id_casa; \"\"\") else: cursor.execute(\"\"\"", "[{ 'num_instalacao': x[0], 'cpf_titular': x[1] } for x in instalacoes] class Inquilino_DAO(DAO): def", "dt_pag , 'deposito': deposito , 'id_contrato': id_contrato } except sqlite3.Error as e: if", "====================================== \"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import config def make_connection(): return sqlite3.connect(config.DATABASE_URL) class", "= self.conn.cursor() cursor.execute(query, (id, )) if commit: self.conn.commit() except sqlite3.Error as e: if", "def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover", "Exception(\"Necessário prover um valor\") query = f'''UPDATE contrato SET valor = ? WHERE", "= self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato; \"\"\") contratos = cursor.fetchall() return [{", "{', '.join([f'{key}_inq = ?' for key in kwargs.keys()])} WHERE id_inq = ?''' try:", "and id_inq in [x['id_inq'] for x in c.todos_contratos() if x['ativo']]: raise InquilinoException() if", "Inquilino\") query = f'''UPDATE casa SET {', '.join([f\"{key}{'_casa' if key != 'num_instalacao' else", "raise Exception(\"Necessário prover um número de instalação\") if cpf is None: raise Exception(\"Necessário", "'valor_aluguel': valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica } except sqlite3.Error as e: if rollback:", "[x['id_inq'] for x in c.todos_contratos() if x['ativo']]: raise InquilinoException() if id_casa and id_casa", "IF NOT EXISTS pagamento( id_pag INTEGER NOT NULL PRIMARY KEY, dt_venc VARCHAR(23) NOT", "rollback: self.conn.rollback() return None def _valida(self, id_inq=None, id_casa=None): c = Contrato_DAO(make_connection()) if id_inq", "def altera_casa(self, id=None, commit=False, rollback=False, **kwargs): if id is None: raise Exception(\"Necessário prover", "id_casa, id_inq) VALUES (?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato, dia_vencimento, casa, inq)) if commit: self.conn.commit()", "UNIQUE ); CREATE TABLE IF NOT EXISTS casa( id_casa INTEGER NOT NULL PRIMARY", "nome_inq, rg_inq) VALUES (?, ?, ?) \"\"\", (cpf, nome, rg)) if commit: self.conn.commit()", "ID\") query = '''UPDATE contrato SET ativo = 0 WHERE id_contrato = ?'''", "WHERE id_contrato = ?; \"\"\", tuple([id])) contratos = cursor.fetchall() return [{ 'id_contrato': x[0],", "query = '''UPDATE contrato SET ativo = 1 WHERE id_contrato = ?''' try:", "is None: raise Exception(\"Necessário prover um valor\") query = f'''UPDATE contrato SET valor", "'id_pag': cursor.lastrowid , 'dt_venc': dt_venc , 'dt_pag': dt_pag , 'deposito': deposito , 'id_contrato':", "adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False): if num_instalacao is None: raise Exception(\"Necessário prover um", "def todas_instalacoes(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM instalacao_eletrica; \"\"\") instalcoes =", "if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() class Contrato_DAO(DAO): def", "if ativos and inativos: raise Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\" select * from inquilino", "sqlite3.Error as e: if rollback: self.conn.rollback() def ativa_contrato(self, id=None, commit=False, rollback=False): if id", "NULL, ativo INTEGER NOT NULL, dt_fim_contrato DATE NOT NULL, dia_venc_aluguel INTEGER NOT NULL,", "WHERE ativo ) GROUP BY c.id_casa; \"\"\") else: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa,", ")) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def ativa_contrato(self,", "cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id])) if commit:", "VARCHAR(11) NOT NULL UNIQUE ); CREATE TABLE IF NOT EXISTS casa( id_casa INTEGER", "id is None: raise Exception(\"Necessário prover um ID\") query = '''UPDATE contrato SET", "NULL, id_casa INTEGER NOT NULL, id_inq INTEGER NOT NULL ); CREATE TABLE IF", "NOT EXISTS instalacao_eletrica ( num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY, cpf_titular VARCHAR(11) NOT", "try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq, nome_inq, rg_inq) VALUES (?, ?,", "novas informações para o Inquilino\") query = f'''UPDATE inquilino SET {', '.join([f'{key}_inq =", "TABLE IF NOT EXISTS inquilino( id_inq INTEGER NOT NULL PRIMARY KEY, cpf_inq VARCHAR(11)", "id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um ID\") C", "cpf_titular = ? WHERE num_instalacao = ? ''' # return None try: cursor", "c = Contrato_DAO(make_connection()) if id_inq and id_inq in [x['id_inq'] for x in c.todos_contratos()", "in (select DISTINCT id_inq from contrato where ativo); \"\"\") elif inativos: cursor.execute(\"\"\" select", "contrato\") if dia_vencimento is None: raise Exception(\"Necessário prover uma data de vencimento\") if", "self.conn.commit() return { 'id_casa': cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel': valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica", "= ?''' try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) +", "commit: self.conn.commit() return { 'id_inq': cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq': nome, 'rg_inq': rg }", "commit=False, rollback=False): if cpf is None: raise Exception(\"Necessário prover um número de CPF\")", "self.conn.commit() return { 'num_instalacao': num_instalacao, 'cpf_titular': cpf } except sqlite3.Error as e: #", "self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() class Contrato_DAO(DAO): def adiciona_contrato(self, valor=None,", "(select DISTINCT id_inq from contrato where ativo); \"\"\") elif inativos: cursor.execute(\"\"\" select *", "id_pag INTEGER NOT NULL PRIMARY KEY, dt_venc VARCHAR(23) NOT NULL, dt_pag VARCHAR(23), deposito", "VARCHAR(10) NOT NULL ); CREATE TABLE IF NOT EXISTS contrato( id_contrato INTEGER NOT", "'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica } except sqlite3.Error as e: if rollback: self.conn.rollback() return", "None: raise Exception(\"Necessário prover um ID\") query = '''UPDATE contrato SET ativo =", "_valida(self, id_inq=None, id_casa=None): c = Contrato_DAO(make_connection()) if id_inq and id_inq in [x['id_inq'] for", "id_inq) VALUES (?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato, dia_vencimento, casa, inq)) if commit: self.conn.commit() return", "'nome_inq': x[2], 'rg_inq': x[3] } for x in inquilinos] def altera_inquilino(self, id=None, commit=False,", "INTEGER NOT NULL, id_casa INTEGER NOT NULL, id_inq INTEGER NOT NULL ); CREATE", "commit=False, rollback=False, **kwargs): if id is None: raise Exception(\"Necessário prover um ID\") if", "if id_inq and id_inq in [x['id_inq'] for x in c.todos_contratos() if x['ativo']]: raise", "inq } except sqlite3.Error as e: if rollback: self.conn.rollback() return None def _valida(self,", "ativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um ID\")", ", 'deposito': x[3] , 'id_contrato': x[4] } for x in pagamentos] def start_db(conn):", "conn class Casa_DAO(DAO): def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False, rollback=False): if nome", "escolher um inquilino\") try: cursor = self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\" INSERT INTO contrato(valor,", "id)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def inativa_contrato(self,", "if rollback: self.conn.rollback() def ativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise", "class Casa_DAO(DAO): def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False, rollback=False): if nome is", "altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um", "rollback: self.conn.rollback() return None def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False): query = f'''UPDATE", "uma data de pagamento\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc, dt_pag,", "in instalacoes] class Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None, nome=None, rg=None, commit=False, rollback=False): if cpf", "is None: raise Exception(\"Necessário prover um número de CPF\") if nome is None:", "'num_instalacao': num_instalacao, 'cpf_titular': cpf } except sqlite3.Error as e: # e if rollback:", "CasaException() def todos_contratos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato; \"\"\") contratos", "from inquilino where id_inq not in (select DISTINCT id_inq from contrato where ativo);", "nome is None: raise Exception(\"Necessário prover nome.\") if valor_aluguel is None: raise Exception(\"Necessário", "cpf_titular FROM casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao; \"\"\")", "rollback=False): if nome is None: raise Exception(\"Necessário prover nome.\") if valor_aluguel is None:", "Exception(\"Necessário prover um número de instalação\") if cpf is None: raise Exception(\"Necessário prover", "for x in c.todos_contratos() if x['ativo']]: raise CasaException() def todos_contratos(self): cursor = self.conn.cursor()", "e: if rollback: self.conn.rollback() def get_contrato(self, id): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT *", "de Aluguel ====================================== \"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import config def make_connection(): return", "except sqlite3.Error as e: # e if rollback: self.conn.rollback() return None def altera_instalacao(self,", "cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel': valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica } except sqlite3.Error as", "i.num_instalacao WHERE c.id_casa NOT IN ( SELECT casa.id_casa from casa JOIN contrato ON", "um ID\") if not len(kwargs): raise Exception(\"Necessário prover novas informações para o Inquilino\")", "commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() class Contrato_DAO(DAO): def adiciona_contrato(self,", "def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False): query = f'''UPDATE instalacao_eletrica SET cpf_titular =", "def adiciona_inquilino(self, cpf=None, nome=None, rg=None, commit=False, rollback=False): if cpf is None: raise Exception(\"Necessário", "= i.num_instalacao WHERE c.id_casa NOT IN ( SELECT casa.id_casa from casa JOIN contrato", "uma data de vencimento\") if casa is None: raise Exception(\"Necessário escolher uma casa\")", "todos_pagamentos_contrato(self, id_contrato): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento WHERE pagamento.id_contrato =", "NOT NULL UNIQUE ); CREATE TABLE IF NOT EXISTS casa( id_casa INTEGER NOT", "valor_aluguel, agua, instalacao_eletrica)) if commit: self.conn.commit() return { 'id_casa': cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel':", "if key != 'num_instalacao' else '' } = ?\" for key in kwargs.keys()])}", "cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq, nome_inq, rg_inq) VALUES (?, ?, ?) \"\"\", (cpf, nome,", "valor=None, ativo=True, dia_vencimento=None, fim_contrato=None, casa=None, inq=None, commit=False, rollback=False): if valor is None: raise", "= self.conn.cursor() cursor.execute(\"\"\" INSERT INTO instalacao_eletrica VALUES (?, ?) \"\"\", (num_instalacao, cpf)) if", "SELECT * from inquilino; \"\"\") inquilinos = cursor.fetchall() return [{ 'id_inq': x[0], 'cpf_inq':", "dia_venc_aluguel, id_casa, id_inq) VALUES (?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato, dia_vencimento, casa, inq)) if commit:", "VARCHAR(11) UNIQUE, FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) ); CREATE TABLE IF NOT EXISTS", "NOT EXISTS contrato( id_contrato INTEGER NOT NULL PRIMARY KEY, valor REAL NOT NULL,", "agua, 'num_instalacao_eletrica': instalacao_eletrica } except sqlite3.Error as e: if rollback: self.conn.rollback() return None", "cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato; \"\"\") contratos = cursor.fetchall() return", "as e: if rollback: self.conn.rollback() return None def todas_casas(self, vazias=False): cursor = self.conn.cursor()", "rg_inq) VALUES (?, ?, ?) \"\"\", (cpf, nome, rg)) if commit: self.conn.commit() return", "if cpf is None: raise Exception(\"Necessário prover um número de CPF\") if nome", "key in kwargs.keys()])} WHERE id_casa = ?''' # return None try: cursor =", "= self.conn.cursor() cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq, nome_inq, rg_inq) VALUES (?, ?, ?) \"\"\",", "try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc, dt_pag, deposito, id_contrato) VALUES (?,", "casas = cursor.fetchall() return [{ 'id_casa': x[0], 'nome_casa': x[1], 'valor_aluguel': x[2], 'agua_casa': x[3],", "if num_instalacao is None: raise Exception(\"Necessário prover um número de instalação\") if cpf", "class Contrato_DAO(DAO): def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None, fim_contrato=None, casa=None, inq=None, commit=False, rollback=False): if", "return { 'id_casa': cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel': valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica }", "try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO instalacao_eletrica VALUES (?, ?) \"\"\", (num_instalacao,", "tuple([id])) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() class Contrato_DAO(DAO):", "= ?''' print(query) try: cursor = self.conn.cursor() cursor.execute(query, (valor, id)) if commit: self.conn.commit()", "prover um valor de aluguel para o contrato\") if dia_vencimento is None: raise", "\"\"\") elif inativos: cursor.execute(\"\"\" select * from inquilino where id_inq not in (select", "x in pagamentos] def start_db(conn): cursor = conn.cursor() cursor.executescript(\"\"\" CREATE TABLE IF NOT", "EXISTS pagamento( id_pag INTEGER NOT NULL PRIMARY KEY, dt_venc VARCHAR(23) NOT NULL, dt_pag", "if id is None: raise Exception(\"Necessário prover um ID\") if not len(kwargs): raise", "return { 'id_contrato': cursor.lastrowid, 'valor': valor, 'ativo': ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa':", "'deposito': x[3] , 'id_contrato': x[4] } for x in pagamentos] def start_db(conn): cursor", "commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def inativa_contrato(self, id=None, commit=False,", "x in casas] def altera_casa(self, id=None, commit=False, rollback=False, **kwargs): if id is None:", "INTEGER NOT NULL PRIMARY KEY, valor REAL NOT NULL, ativo INTEGER NOT NULL,", "+ tuple([id])) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() class", "for x in c.todos_contratos() if x['ativo']]: raise InquilinoException() if id_casa and id_casa in", ", 'id_contrato': id_contrato } except sqlite3.Error as e: if rollback: self.conn.rollback() return None", "x in inquilinos] def altera_inquilino(self, id=None, commit=False, rollback=False, **kwargs): if id is None:", "\"\"\", (id_contrato)) pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc': x[1] ,", "commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um ID\") query =", "?) \"\"\", (num_instalacao, cpf)) if commit: self.conn.commit() return { 'num_instalacao': num_instalacao, 'cpf_titular': cpf", "valor_aluguel_casa INTEGER NOT NULL, agua_casa VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE, FOREIGN KEY (num_instalacao) REFERENCES", "rollback: self.conn.rollback() def ativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário", "?''' try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id]))", "número de CPF\") if nome is None: raise Exception(\"Necessário prover um Nome\") if", "'deposito': deposito , 'id_contrato': id_contrato } except sqlite3.Error as e: if rollback: self.conn.rollback()", "try: cursor = self.conn.cursor() cursor.execute(query, (id, )) if commit: self.conn.commit() except sqlite3.Error as", "uma casa\") if inq is None: raise Exception(\"Necessário escolher um inquilino\") try: cursor", "x[3] , 'id_contrato': x[4] } for x in pagamentos] def start_db(conn): cursor =", "* from inquilino where id_inq in (select DISTINCT id_inq from contrato where ativo);", "\"\"\", (valor, ativo,fim_contrato, dia_vencimento, casa, inq)) if commit: self.conn.commit() return { 'id_contrato': cursor.lastrowid,", "Exception(\"Necessário prover um número de CPF\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO", "'num_instalacao_eletrica': instalacao_eletrica } except sqlite3.Error as e: if rollback: self.conn.rollback() return None def", "class InquilinoException(Exception): ... class CasaException(Exception): ... class DAO(): def __init__(self, conn): self.conn =", "= ?\" for key in kwargs.keys()])} WHERE id_casa = ?''' # return None", "is None: raise Exception(\"Necessário prover um valor para o aluguel.\") try: cursor =", "None: raise Exception(\"Necessário prover uma data de pagamento\") try: cursor = self.conn.cursor() cursor.execute(\"\"\"", "if cpf is None: raise Exception(\"Necessário prover um número de CPF\") try: cursor", "id_contrato is None: raise Exception(\"Necessário prover um contrato\") if dt_venc is None: raise", "INSERT INTO pagamento(dt_venc, dt_pag, deposito, id_contrato) VALUES (?, ?, ?, ?) \"\"\", (dt_venc,", "cursor.execute(\"\"\" SELECT * FROM contrato WHERE id_contrato = ?; \"\"\", tuple([id])) contratos =", "prover nome.\") if valor_aluguel is None: raise Exception(\"Necessário prover um valor para o", "casa.id_casa WHERE ativo ) GROUP BY c.id_casa; \"\"\") else: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa,", "x[4] } for x in pagamentos] def todos_pagamentos_contrato(self, id_contrato): cursor = self.conn.cursor() cursor.execute(\"\"\"", "in (select DISTINCT id_inq from contrato where ativo); \"\"\") else: cursor.execute(\"\"\" SELECT *", "casa\") if inq is None: raise Exception(\"Necessário escolher um inquilino\") try: cursor =", "= i.num_instalacao; \"\"\") casas = cursor.fetchall() return [{ 'id_casa': x[0], 'nome_casa': x[1], 'valor_aluguel':", "\"\"\", (nome, valor_aluguel, agua, instalacao_eletrica)) if commit: self.conn.commit() return { 'id_casa': cursor.lastrowid, 'nome_casa':", "raise Exception(\"Necessário escolher uma casa\") if inq is None: raise Exception(\"Necessário escolher um", "= self.conn.cursor() cursor.execute(query, (id,)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback:", "'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] } for x in contratos] def altera_valor_contrato(self,", "x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] } for x in contratos] def", "= cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc': x[1] , 'dt_pag': x[2] ,", "cursor = conn.cursor() cursor.executescript(\"\"\" CREATE TABLE IF NOT EXISTS instalacao_eletrica ( num_instalacao VARCHAR(20)", "i ON c.num_instalacao = i.num_instalacao; \"\"\") casas = cursor.fetchall() return [{ 'id_casa': x[0],", "agua=None, instalacao_eletrica=None, commit=False, rollback=False): if nome is None: raise Exception(\"Necessário prover nome.\") if", ") query = '''UPDATE contrato SET ativo = 1 WHERE id_contrato = ?'''", "sqlite3.Error as e: if rollback: self.conn.rollback() return None def todos_inquilinos(self, ativos=False, inativos=False): cursor", "'cpf_inq': cpf, 'nome_inq': nome, 'rg_inq': rg } except sqlite3.Error as e: if rollback:", "rg_inq VARCHAR(10) NOT NULL ); CREATE TABLE IF NOT EXISTS contrato( id_contrato INTEGER", "x[0], 'cpf_titular': x[1] } for x in instalacoes] class Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None,", "\"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import config def make_connection(): return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception):", "nome=None, rg=None, commit=False, rollback=False): if cpf is None: raise Exception(\"Necessário prover um número", "prover um valor\") query = f'''UPDATE contrato SET valor = ? WHERE id_contrato", "\"\"\") else: cursor.execute(\"\"\" SELECT * from inquilino; \"\"\") inquilinos = cursor.fetchall() return [{", "NOT IN ( SELECT casa.id_casa from casa JOIN contrato ON contrato.id_casa= casa.id_casa WHERE", "GROUP BY c.id_casa; \"\"\") else: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular", "dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq) VALUES (?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato, dia_vencimento, casa, inq)) if", "cursor = self.conn.cursor() if ativos and inativos: raise Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\" select", "raise Exception(\"Necessário prover um valor de aluguel para o contrato\") if dia_vencimento is", "return None try: cursor = self.conn.cursor() cursor.execute(query, (cpf, num_instalacao)) if commit: self.conn.commit() except", "cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO instalacao_eletrica VALUES (?, ?) \"\"\", (num_instalacao, cpf))", "self.conn.cursor() cursor.execute(query, (id, )) if commit: self.conn.commit() except sqlite3.Error as e: if rollback:", "e: if rollback: self.conn.rollback() class Contrato_DAO(DAO): def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None, fim_contrato=None, casa=None,", "id_inq from contrato where ativo); \"\"\") else: cursor.execute(\"\"\" SELECT * from inquilino; \"\"\")", "class DAO(): def __init__(self, conn): self.conn = conn class Casa_DAO(DAO): def adiciona_casa(self, nome=None,", "? WHERE num_instalacao = ? ''' # return None try: cursor = self.conn.cursor()", "raise Exception(\"Necessário prover uma data de pagamento\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT", "x[4], 'id_casa': x[5], 'id_inq': x[6] } for x in contratos][0] class PagamentoDAO(DAO): def", "NOT NULL, dt_fim_contrato DATE NOT NULL, dia_venc_aluguel INTEGER NOT NULL, id_casa INTEGER NOT", "NOT NULL, dt_pag VARCHAR(23), deposito INTEGER NOT NULL, id_contrato INTEGER , FOREIGN KEY", "'id_inq': cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq': nome, 'rg_inq': rg } except sqlite3.Error as e:", "data de pagamento\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc, dt_pag, deposito,", "'nome_casa': nome, 'valor_aluguel': valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica } except sqlite3.Error as e:", "in pagamentos] def start_db(conn): cursor = conn.cursor() cursor.executescript(\"\"\" CREATE TABLE IF NOT EXISTS", "inquilino where id_inq not in (select DISTINCT id_inq from contrato where ativo); \"\"\")", "INTEGER NOT NULL, dt_fim_contrato DATE NOT NULL, dia_venc_aluguel INTEGER NOT NULL, id_casa INTEGER", "None: raise Exception(\"Necessário prover um número de instalação\") if cpf is None: raise", "make_connection(): return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ... class CasaException(Exception): ... class DAO(): def __init__(self,", "NULL ); CREATE TABLE IF NOT EXISTS contrato( id_contrato INTEGER NOT NULL PRIMARY", "def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False): if num_instalacao is None: raise Exception(\"Necessário prover", "for x in pagamentos] def todos_pagamentos_contrato(self, id_contrato): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT *", "self._valida(inq, casa) cursor.execute(\"\"\" INSERT INTO contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq) VALUES (?,?,?,?,?,?)", ", 'deposito': x[3] , 'id_contrato': x[4] } for x in pagamentos] def todos_pagamentos_contrato(self,", "self.conn.cursor() cursor.execute(query, (id,)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback()", "cpf=None, nome=None, rg=None, commit=False, rollback=False): if cpf is None: raise Exception(\"Necessário prover um", "id is None: raise Exception(\"Necessário prover um ID\") C = self.get_contrato(id) self._valida(C['id_inq'], C['id_casa']", "EXISTS casa( id_casa INTEGER NOT NULL PRIMARY KEY, nome_casa INTEGER NOT NULL, valor_aluguel_casa", "self.conn.cursor() if ativos and inativos: raise Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\" select * from", "class Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None, nome=None, rg=None, commit=False, rollback=False): if cpf is None:", "'cpf_inq': x[1], 'nome_inq': x[2], 'rg_inq': x[3] } for x in inquilinos] def altera_inquilino(self,", "and id_casa in [x['id_casa'] for x in c.todos_contratos() if x['ativo']]: raise CasaException() def", "adiciona_casa(self, nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False, rollback=False): if nome is None: raise Exception(\"Necessário", "self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id])) if commit: self.conn.commit() except", "valor is None: raise Exception(\"Necessário prover um valor de aluguel para o contrato\")", "c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao; \"\"\") casas = cursor.fetchall()", "nome, rg)) if commit: self.conn.commit() return { 'id_inq': cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq': nome,", "None: raise Exception(\"Necessário escolher uma casa\") if inq is None: raise Exception(\"Necessário escolher", "pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc': x[1] , 'dt_pag': x[2]", "JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao; \"\"\") casas = cursor.fetchall() return [{", "None: raise Exception(\"Necessário prover um valor para o aluguel.\") try: cursor = self.conn.cursor()", "if valor is None: raise Exception(\"Necessário prover um valor\") query = f'''UPDATE contrato", "'cpf_titular': cpf } except sqlite3.Error as e: # e if rollback: self.conn.rollback() return", "cursor.execute(\"\"\" select * from inquilino where id_inq in (select DISTINCT id_inq from contrato", "elif ativos: cursor.execute(\"\"\" select * from inquilino where id_inq in (select DISTINCT id_inq", "dt_pag VARCHAR(23), deposito INTEGER NOT NULL, id_contrato INTEGER , FOREIGN KEY (id_contrato) REFERENCES", "valor\") query = f'''UPDATE contrato SET valor = ? WHERE id_contrato = ?'''", "cursor.execute(\"\"\" SELECT * FROM instalacao_eletrica; \"\"\") instalcoes = cursor.fetchall() return [{ 'num_instalacao': x[0],", "VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE, FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) ); CREATE TABLE IF", "instalacao_eletrica i ON c.num_instalacao = i.num_instalacao WHERE c.id_casa NOT IN ( SELECT casa.id_casa", "is None: raise Exception(\"Necessário prover um número de instalação\") if cpf is None:", "x[6] } for x in contratos][0] class PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None,", "except sqlite3.Error as e: if rollback: self.conn.rollback() def get_contrato(self, id): cursor = self.conn.cursor()", "if x['ativo']]: raise CasaException() def todos_contratos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM", "except sqlite3.Error as e: if rollback: self.conn.rollback() return None def todas_casas(self, vazias=False): cursor", "'ativo': x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] } for x", "contrato; \"\"\") contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo': x[2],", "um número de CPF\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO instalacao_eletrica VALUES", "adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None, fim_contrato=None, casa=None, inq=None, commit=False, rollback=False): if valor is None:", "sqlite3.Error as e: # e if rollback: self.conn.rollback() return None def altera_instalacao(self, num_instalacao,", "Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False): if num_instalacao is None: raise Exception(\"Necessário", "is None: raise Exception(\"Necessário prover nome.\") if valor_aluguel is None: raise Exception(\"Necessário prover", "para o Inquilino\") query = f'''UPDATE casa SET {', '.join([f\"{key}{'_casa' if key !=", "dia_vencimento=None, fim_contrato=None, casa=None, inq=None, commit=False, rollback=False): if valor is None: raise Exception(\"Necessário prover", "# return None try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys()))", "id_contrato): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento WHERE pagamento.id_contrato = ?;", "inquilino(cpf_inq, nome_inq, rg_inq) VALUES (?, ?, ?) \"\"\", (cpf, nome, rg)) if commit:", "if commit: self.conn.commit() return { 'id_casa': cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel': valor_aluguel, 'agua_casa': agua,", "um número de CPF\") if nome is None: raise Exception(\"Necessário prover um Nome\")", "def todos_pagamentos_contrato(self, id_contrato): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento WHERE pagamento.id_contrato", "sqlite3.Error as e: if rollback: self.conn.rollback() def inativa_contrato(self, id=None, commit=False, rollback=False): if id", "} except sqlite3.Error as e: if rollback: self.conn.rollback() return None def _valida(self, id_inq=None,", "get_contrato(self, id): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato WHERE id_contrato =", "(id, )) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def", "id is None: raise Exception(\"Necessário prover um ID\") if valor is None: raise", "id_inq and id_inq in [x['id_inq'] for x in c.todos_contratos() if x['ativo']]: raise InquilinoException()", "rollback=False): if id is None: raise Exception(\"Necessário prover um ID\") if valor is", "PRIMARY KEY, cpf_titular VARCHAR(11) NOT NULL UNIQUE ); CREATE TABLE IF NOT EXISTS", "raise Exception(\"Necessário prover um ID\") if not len(kwargs): raise Exception(\"Necessário prover novas informações", "for key in kwargs.keys()])} WHERE id_inq = ?''' try: cursor = self.conn.cursor() cursor.execute(query,", "TABLE IF NOT EXISTS instalacao_eletrica ( num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY, cpf_titular", "None: raise Exception(\"Necessário prover um valor de aluguel para o contrato\") if dia_vencimento", "um inquilino\") try: cursor = self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\" INSERT INTO contrato(valor, ativo,", "inquilino SET {', '.join([f'{key}_inq = ?' for key in kwargs.keys()])} WHERE id_inq =", "for key in kwargs.keys()])} WHERE id_casa = ?''' # return None try: cursor", "instalacao_eletrica(num_instalacao) ); CREATE TABLE IF NOT EXISTS inquilino( id_inq INTEGER NOT NULL PRIMARY", "?; \"\"\", tuple([id])) contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo':", "[x['id_casa'] for x in c.todos_contratos() if x['ativo']]: raise CasaException() def todos_contratos(self): cursor =", "SET ativo = 0 WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query,", "return [{ 'num_instalacao': x[0], 'cpf_titular': x[1] } for x in instalacoes] class Inquilino_DAO(DAO):", "rollback: self.conn.rollback() def todas_instalacoes(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM instalacao_eletrica; \"\"\")", "ativo=True, dia_vencimento=None, fim_contrato=None, casa=None, inq=None, commit=False, rollback=False): if valor is None: raise Exception(\"Necessário", "sqlite3.Error as e: if rollback: self.conn.rollback() return None def todos_pagamentos(self): cursor = self.conn.cursor()", "self.conn.commit() return { 'id_pag': cursor.lastrowid , 'dt_venc': dt_venc , 'dt_pag': dt_pag , 'deposito':", "self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM instalacao_eletrica; \"\"\") instalcoes = cursor.fetchall() return [{ 'num_instalacao':", "'nome_inq': nome, 'rg_inq': rg } except sqlite3.Error as e: if rollback: self.conn.rollback() return", "self.conn.cursor() cursor.execute(query, (valor, id)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback:", "prover um ID\") if not len(kwargs): raise Exception(\"Necessário prover novas informações para o", "__init__(self, conn): self.conn = conn class Casa_DAO(DAO): def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None,", "} except sqlite3.Error as e: if rollback: self.conn.rollback() return None def todos_pagamentos(self): cursor", "todos_pagamentos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento; \"\"\") pagamentos = cursor.fetchall()", "nome is None: raise Exception(\"Necessário prover um Nome\") if rg is None: raise", "} for x in contratos] def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False): if id", "PRIMARY KEY, valor REAL NOT NULL, ativo INTEGER NOT NULL, dt_fim_contrato DATE NOT", "from inquilino; \"\"\") inquilinos = cursor.fetchall() return [{ 'id_inq': x[0], 'cpf_inq': x[1], 'nome_inq':", "agua_casa, num_instalacao) VALUES (?,?,?,?) \"\"\", (nome, valor_aluguel, agua, instalacao_eletrica)) if commit: self.conn.commit() return", "in contratos] def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False): if id is None: raise", "rollback=False): if id_contrato is None: raise Exception(\"Necessário prover um contrato\") if dt_venc is", "if id is None: raise Exception(\"Necessário prover um ID\") query = '''UPDATE contrato", "cursor.lastrowid, 'valor': valor, 'ativo': ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa': casa, 'id_inq': inq", "?; \"\"\", (id_contrato)) pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc': x[1]", "INTEGER NOT NULL, valor_aluguel_casa INTEGER NOT NULL, agua_casa VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE, FOREIGN", "num_instalacao, cpf, commit=False, rollback=False): query = f'''UPDATE instalacao_eletrica SET cpf_titular = ? WHERE", "if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def", "... class CasaException(Exception): ... class DAO(): def __init__(self, conn): self.conn = conn class", "def inativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um", "self.conn.cursor() cursor.execute(\"\"\" INSERT INTO instalacao_eletrica VALUES (?, ?) \"\"\", (num_instalacao, cpf)) if commit:", "contrato.id_casa= casa.id_casa WHERE ativo ) GROUP BY c.id_casa; \"\"\") else: cursor.execute(\"\"\" SELECT c.id_casa,", "cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM instalacao_eletrica; \"\"\") instalcoes = cursor.fetchall() return", "id_inq INTEGER NOT NULL PRIMARY KEY, cpf_inq VARCHAR(11) NOT NULL UNIQUE, nome_inq VARCHAR(40)", "); CREATE TABLE IF NOT EXISTS inquilino( id_inq INTEGER NOT NULL PRIMARY KEY,", "instalacao_eletrica; \"\"\") instalcoes = cursor.fetchall() return [{ 'num_instalacao': x[0], 'cpf_titular': x[1] } for", "else: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa c LEFT", "casa( id_casa INTEGER NOT NULL PRIMARY KEY, nome_casa INTEGER NOT NULL, valor_aluguel_casa INTEGER", "f'''UPDATE instalacao_eletrica SET cpf_titular = ? WHERE num_instalacao = ? ''' # return", "k in kwargs.keys())) + tuple([id])) if commit: self.conn.commit() except sqlite3.Error as e: if", "todas_instalacoes(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM instalacao_eletrica; \"\"\") instalcoes = cursor.fetchall()", "c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa c LEFT JOIN instalacao_eletrica i", "altera_inquilino(self, id=None, commit=False, rollback=False, **kwargs): if id is None: raise Exception(\"Necessário prover um", "\"\"\") contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo': x[2], 'dt_fim_contrato':", "'.join([f'{key}_inq = ?' for key in kwargs.keys()])} WHERE id_inq = ?''' try: cursor", "self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato WHERE id_contrato = ?; \"\"\", tuple([id])) contratos", "CREATE TABLE IF NOT EXISTS instalacao_eletrica ( num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY,", "para o Inquilino\") query = f'''UPDATE inquilino SET {', '.join([f'{key}_inq = ?' for", "?, ?) \"\"\", (dt_venc, dt_pag, deposito, id_contrato)) if commit: self.conn.commit() return { 'id_pag':", "rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False): if num_instalacao is", "FROM casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao WHERE c.id_casa", "Exception(\"Necessário prover um Nome\") if rg is None: raise Exception(\"Necessário prover um RG\")", "from contrato where ativo); \"\"\") elif inativos: cursor.execute(\"\"\" select * from inquilino where", "'id_contrato': x[4] } for x in pagamentos] def todos_pagamentos_contrato(self, id_contrato): cursor = self.conn.cursor()", "Exception(\"Necessário prover um contrato\") if dt_venc is None: raise Exception(\"Necessário prover uma data", "?''' # return None try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in", "raise Exception(\"Necessário prover uma data de vencimento\") if casa is None: raise Exception(\"Necessário", "ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq) VALUES (?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato, dia_vencimento, casa, inq))", "query = f'''UPDATE inquilino SET {', '.join([f'{key}_inq = ?' for key in kwargs.keys()])}", "KEY, cpf_titular VARCHAR(11) NOT NULL UNIQUE ); CREATE TABLE IF NOT EXISTS casa(", "self.conn.rollback() class Contrato_DAO(DAO): def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None, fim_contrato=None, casa=None, inq=None, commit=False, rollback=False):", "IF NOT EXISTS contrato( id_contrato INTEGER NOT NULL PRIMARY KEY, valor REAL NOT", "x[1], 'ativo': x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] } for", "aluguel para o contrato\") if dia_vencimento is None: raise Exception(\"Necessário prover uma data", "= self.conn.cursor() if ativos and inativos: raise Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\" select *", "NOT NULL ); CREATE TABLE IF NOT EXISTS pagamento( id_pag INTEGER NOT NULL", "casa JOIN contrato ON contrato.id_casa= casa.id_casa WHERE ativo ) GROUP BY c.id_casa; \"\"\")", "if rollback: self.conn.rollback() return None def _valida(self, id_inq=None, id_casa=None): c = Contrato_DAO(make_connection()) if", "except sqlite3.Error as e: if rollback: self.conn.rollback() class Contrato_DAO(DAO): def adiciona_contrato(self, valor=None, ativo=True,", "VALUES (?, ?, ?, ?) \"\"\", (dt_venc, dt_pag, deposito, id_contrato)) if commit: self.conn.commit()", "fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa': casa, 'id_inq': inq } except sqlite3.Error as e: if", "self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento WHERE pagamento.id_contrato = ?; \"\"\", (id_contrato)) pagamentos", "id_inq from contrato where ativo); \"\"\") elif inativos: cursor.execute(\"\"\" select * from inquilino", "if not len(kwargs): raise Exception(\"Necessário prover novas informações para o Inquilino\") query =", "Gerenciador de Casas de Aluguel ====================================== \"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import config", "); CREATE TABLE IF NOT EXISTS pagamento( id_pag INTEGER NOT NULL PRIMARY KEY,", "prover um ID\") query = '''UPDATE contrato SET ativo = 0 WHERE id_contrato", "id_inq = ?''' try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys()))", "cursor.fetchall() return [{ 'num_instalacao': x[0], 'cpf_titular': x[1] } for x in instalacoes] class", "INTO pagamento(dt_venc, dt_pag, deposito, id_contrato) VALUES (?, ?, ?, ?) \"\"\", (dt_venc, dt_pag,", "de instalação\") if cpf is None: raise Exception(\"Necessário prover um número de CPF\")", "'num_instalacao_eletrica': x[4], 'cpf': x[5] } for x in casas] def altera_casa(self, id=None, commit=False,", "cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc, dt_pag, deposito, id_contrato) VALUES (?, ?,", "instalacao_eletrica VALUES (?, ?) \"\"\", (num_instalacao, cpf)) if commit: self.conn.commit() return { 'num_instalacao':", "SET ativo = 1 WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query,", "(num_instalacao, cpf)) if commit: self.conn.commit() return { 'num_instalacao': num_instalacao, 'cpf_titular': cpf } except", "cursor.execute(query, (cpf, num_instalacao)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback()", "start_db(conn): cursor = conn.cursor() cursor.executescript(\"\"\" CREATE TABLE IF NOT EXISTS instalacao_eletrica ( num_instalacao", "rg=None, commit=False, rollback=False): if cpf is None: raise Exception(\"Necessário prover um número de", "commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def get_contrato(self, id): cursor", "= f'''UPDATE inquilino SET {', '.join([f'{key}_inq = ?' for key in kwargs.keys()])} WHERE", "número de instalação\") if cpf is None: raise Exception(\"Necessário prover um número de", "= self.conn.cursor() cursor.execute(query, (valor, id)) if commit: self.conn.commit() except sqlite3.Error as e: if", "cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc, dt_pag, deposito, id_contrato) VALUES (?, ?, ?, ?) \"\"\",", "= ?' for key in kwargs.keys()])} WHERE id_inq = ?''' try: cursor =", "'id_casa': x[5], 'id_inq': x[6] } for x in contratos] def altera_valor_contrato(self, id=None, valor=None,", "inq is None: raise Exception(\"Necessário escolher um inquilino\") try: cursor = self.conn.cursor() self._valida(inq,", "... class DAO(): def __init__(self, conn): self.conn = conn class Casa_DAO(DAO): def adiciona_casa(self,", "inativos: raise Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\" select * from inquilino where id_inq in", "= self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato WHERE id_contrato = ?; \"\"\", tuple([id]))", "id_inq not in (select DISTINCT id_inq from contrato where ativo); \"\"\") else: cursor.execute(\"\"\"", "if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def get_contrato(self, id):", "self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\" INSERT INTO contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq) VALUES", "vencimento\") if casa is None: raise Exception(\"Necessário escolher uma casa\") if inq is", "c.id_casa; \"\"\") else: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa", "if rollback: self.conn.rollback() def get_contrato(self, id): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM", "if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def todas_instalacoes(self): cursor", "def __init__(self, conn): self.conn = conn class Casa_DAO(DAO): def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None,", "sqlite3.Error as e: if rollback: self.conn.rollback() return None def todas_casas(self, vazias=False): cursor =", "INTEGER NOT NULL, id_contrato INTEGER , FOREIGN KEY (id_contrato) REFERENCES contrato(id_contrato) ); \"\"\")", "kwargs.keys()])} WHERE id_inq = ?''' try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k", "ativos=False, inativos=False): cursor = self.conn.cursor() if ativos and inativos: raise Exception(\"Conflito\") elif ativos:", "IF NOT EXISTS casa( id_casa INTEGER NOT NULL PRIMARY KEY, nome_casa INTEGER NOT", "Exception(\"Necessário prover um ID\") query = '''UPDATE contrato SET ativo = 0 WHERE", "self.conn.cursor() cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq, nome_inq, rg_inq) VALUES (?, ?, ?) \"\"\", (cpf,", "NOT EXISTS casa( id_casa INTEGER NOT NULL PRIMARY KEY, nome_casa INTEGER NOT NULL,", "{', '.join([f\"{key}{'_casa' if key != 'num_instalacao' else '' } = ?\" for key", "= self.conn.cursor() if vazias: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM", "id_inq=None, id_casa=None): c = Contrato_DAO(make_connection()) if id_inq and id_inq in [x['id_inq'] for x", "INTO instalacao_eletrica VALUES (?, ?) \"\"\", (num_instalacao, cpf)) if commit: self.conn.commit() return {", "in kwargs.keys()])} WHERE id_inq = ?''' try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for", "as e: if rollback: self.conn.rollback() return None def _valida(self, id_inq=None, id_casa=None): c =", "rg } except sqlite3.Error as e: if rollback: self.conn.rollback() return None def todos_inquilinos(self,", "} for x in pagamentos] def todos_pagamentos_contrato(self, id_contrato): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT", "SELECT * FROM pagamento WHERE pagamento.id_contrato = ?; \"\"\", (id_contrato)) pagamentos = cursor.fetchall()", "len(kwargs): raise Exception(\"Necessário prover novas informações para o Inquilino\") query = f'''UPDATE inquilino", "num_instalacao)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def todas_instalacoes(self):", "instalacao_eletrica ( num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY, cpf_titular VARCHAR(11) NOT NULL UNIQUE", "if x['ativo']]: raise InquilinoException() if id_casa and id_casa in [x['id_casa'] for x in", "deposito, id_contrato) VALUES (?, ?, ?, ?) \"\"\", (dt_venc, dt_pag, deposito, id_contrato)) if", "(id_contrato)) pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc': x[1] , 'dt_pag':", "valor=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um ID\") if", "def _valida(self, id_inq=None, id_casa=None): c = Contrato_DAO(make_connection()) if id_inq and id_inq in [x['id_inq']", "cpf, commit=False, rollback=False): query = f'''UPDATE instalacao_eletrica SET cpf_titular = ? WHERE num_instalacao", "= '''UPDATE contrato SET ativo = 1 WHERE id_contrato = ?''' try: cursor", "'dt_venc': dt_venc , 'dt_pag': dt_pag , 'deposito': deposito , 'id_contrato': id_contrato } except", "prover novas informações para o Inquilino\") query = f'''UPDATE inquilino SET {', '.join([f'{key}_inq", "VALUES (?, ?) \"\"\", (num_instalacao, cpf)) if commit: self.conn.commit() return { 'num_instalacao': num_instalacao,", "cpf_inq VARCHAR(11) NOT NULL UNIQUE, nome_inq VARCHAR(40) NOT NULL, rg_inq VARCHAR(10) NOT NULL", "None try: cursor = self.conn.cursor() cursor.execute(query, (cpf, num_instalacao)) if commit: self.conn.commit() except sqlite3.Error", "contrato SET ativo = 0 WHERE id_contrato = ?''' try: cursor = self.conn.cursor()", "cpf_titular VARCHAR(11) NOT NULL UNIQUE ); CREATE TABLE IF NOT EXISTS casa( id_casa", "Exception(\"Necessário prover uma data de vencimento\") if casa is None: raise Exception(\"Necessário escolher", "cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao) VALUES (?,?,?,?) \"\"\",", "c.id_casa NOT IN ( SELECT casa.id_casa from casa JOIN contrato ON contrato.id_casa= casa.id_casa", "raise Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\" select * from inquilino where id_inq in (select", "commit=False, rollback=False): if valor is None: raise Exception(\"Necessário prover um valor de aluguel", "cursor = self.conn.cursor() cursor.execute(query, (id, )) if commit: self.conn.commit() except sqlite3.Error as e:", "INTEGER NOT NULL, agua_casa VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE, FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao)", "cursor.execute(query, (id, )) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback()", "contrato where ativo); \"\"\") elif inativos: cursor.execute(\"\"\" select * from inquilino where id_inq", "PRIMARY KEY, cpf_inq VARCHAR(11) NOT NULL UNIQUE, nome_inq VARCHAR(40) NOT NULL, rg_inq VARCHAR(10)", "'id_pag': x[0] , 'dt_venc': x[1] , 'dt_pag': x[2] , 'deposito': x[3] , 'id_contrato':", "x[3], 'num_instalacao_eletrica': x[4], 'cpf': x[5] } for x in casas] def altera_casa(self, id=None,", "inq=None, commit=False, rollback=False): if valor is None: raise Exception(\"Necessário prover um valor de", "raise Exception(\"Necessário prover nome.\") if valor_aluguel is None: raise Exception(\"Necessário prover um valor", "cpf } except sqlite3.Error as e: # e if rollback: self.conn.rollback() return None", "instalacao_eletrica)) if commit: self.conn.commit() return { 'id_casa': cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel': valor_aluguel, 'agua_casa':", "conn.cursor() cursor.executescript(\"\"\" CREATE TABLE IF NOT EXISTS instalacao_eletrica ( num_instalacao VARCHAR(20) NOT NULL", "None def _valida(self, id_inq=None, id_casa=None): c = Contrato_DAO(make_connection()) if id_inq and id_inq in", "raise Exception(\"Necessário prover um ID\") C = self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] ) query =", "NOT NULL ); CREATE TABLE IF NOT EXISTS contrato( id_contrato INTEGER NOT NULL", "?''' try: cursor = self.conn.cursor() cursor.execute(query, (id, )) if commit: self.conn.commit() except sqlite3.Error", "x[3] } for x in inquilinos] def altera_inquilino(self, id=None, commit=False, rollback=False, **kwargs): if", "None: raise Exception(\"Necessário prover um número de CPF\") try: cursor = self.conn.cursor() cursor.execute(\"\"\"", "valor_aluguel is None: raise Exception(\"Necessário prover um valor para o aluguel.\") try: cursor", "= f'''UPDATE casa SET {', '.join([f\"{key}{'_casa' if key != 'num_instalacao' else '' }", "Exception(\"Necessário prover uma data de vencimento\") if dt_pag is None: raise Exception(\"Necessário prover", "return [{ 'id_pag': x[0] , 'dt_venc': x[1] , 'dt_pag': x[2] , 'deposito': x[3]", "= conn.cursor() cursor.executescript(\"\"\" CREATE TABLE IF NOT EXISTS instalacao_eletrica ( num_instalacao VARCHAR(20) NOT", "INTEGER NOT NULL, id_inq INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS", "LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao; \"\"\") casas = cursor.fetchall() return", "EXISTS inquilino( id_inq INTEGER NOT NULL PRIMARY KEY, cpf_inq VARCHAR(11) NOT NULL UNIQUE,", "LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao WHERE c.id_casa NOT IN (", "not in (select DISTINCT id_inq from contrato where ativo); \"\"\") else: cursor.execute(\"\"\" SELECT", "'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] } for x in contratos][0] class PagamentoDAO(DAO):", "rollback=False): if id is None: raise Exception(\"Necessário prover um ID\") query = '''UPDATE", "NULL, valor_aluguel_casa INTEGER NOT NULL, agua_casa VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE, FOREIGN KEY (num_instalacao)", "# e if rollback: self.conn.rollback() return None def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False):", "Exception(\"Necessário prover novas informações para o Inquilino\") query = f'''UPDATE casa SET {',", "casas] def altera_casa(self, id=None, commit=False, rollback=False, **kwargs): if id is None: raise Exception(\"Necessário", ", 'dt_venc': dt_venc , 'dt_pag': dt_pag , 'deposito': deposito , 'id_contrato': id_contrato }", "'cpf_titular': x[1] } for x in instalacoes] class Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None, nome=None,", "informações para o Inquilino\") query = f'''UPDATE inquilino SET {', '.join([f'{key}_inq = ?'", "e: if rollback: self.conn.rollback() return None def _valida(self, id_inq=None, id_casa=None): c = Contrato_DAO(make_connection())", "Exception(\"Necessário prover uma data de pagamento\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO", "?) \"\"\", (dt_venc, dt_pag, deposito, id_contrato)) if commit: self.conn.commit() return { 'id_pag': cursor.lastrowid", "PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False): if id_contrato is None:", "c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao WHERE c.id_casa NOT IN", "inquilino\") try: cursor = self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\" INSERT INTO contrato(valor, ativo, dt_fim_contrato,", "REAL NOT NULL, ativo INTEGER NOT NULL, dt_fim_contrato DATE NOT NULL, dia_venc_aluguel INTEGER", "None: raise Exception(\"Necessário prover um contrato\") if dt_venc is None: raise Exception(\"Necessário prover", "commit=False, rollback=False): if id_contrato is None: raise Exception(\"Necessário prover um contrato\") if dt_venc", "return None try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) +", "NULL, dt_pag VARCHAR(23), deposito INTEGER NOT NULL, id_contrato INTEGER , FOREIGN KEY (id_contrato)", "INTEGER NOT NULL PRIMARY KEY, cpf_inq VARCHAR(11) NOT NULL UNIQUE, nome_inq VARCHAR(40) NOT", "ativo); \"\"\") else: cursor.execute(\"\"\" SELECT * from inquilino; \"\"\") inquilinos = cursor.fetchall() return", "pagamentos] def todos_pagamentos_contrato(self, id_contrato): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento WHERE", "for x in casas] def altera_casa(self, id=None, commit=False, rollback=False, **kwargs): if id is", "e: if rollback: self.conn.rollback() def ativa_contrato(self, id=None, commit=False, rollback=False): if id is None:", "def todos_contratos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato; \"\"\") contratos =", "cursor.executescript(\"\"\" CREATE TABLE IF NOT EXISTS instalacao_eletrica ( num_instalacao VARCHAR(20) NOT NULL PRIMARY", "'id_inq': x[6] } for x in contratos][0] class PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None, dt_pag=None,", "nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False, rollback=False): if nome is None: raise Exception(\"Necessário prover", "{ 'id_pag': cursor.lastrowid , 'dt_venc': dt_venc , 'dt_pag': dt_pag , 'deposito': deposito ,", "(valor, ativo,fim_contrato, dia_vencimento, casa, inq)) if commit: self.conn.commit() return { 'id_contrato': cursor.lastrowid, 'valor':", "def altera_inquilino(self, id=None, commit=False, rollback=False, **kwargs): if id is None: raise Exception(\"Necessário prover", "Exception(\"Necessário escolher uma casa\") if inq is None: raise Exception(\"Necessário escolher um inquilino\")", "o aluguel.\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao)", "Exception(\"Necessário prover um RG\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq, nome_inq,", "FROM contrato; \"\"\") contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo':", "VALUES (?, ?, ?) \"\"\", (cpf, nome, rg)) if commit: self.conn.commit() return {", "CasaException(Exception): ... class DAO(): def __init__(self, conn): self.conn = conn class Casa_DAO(DAO): def", "casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao) VALUES (?,?,?,?) \"\"\", (nome, valor_aluguel, agua, instalacao_eletrica)) if commit:", "return { 'id_inq': cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq': nome, 'rg_inq': rg } except sqlite3.Error", "NULL UNIQUE ); CREATE TABLE IF NOT EXISTS casa( id_casa INTEGER NOT NULL", "nome, 'valor_aluguel': valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica } except sqlite3.Error as e: if", "return None def todos_inquilinos(self, ativos=False, inativos=False): cursor = self.conn.cursor() if ativos and inativos:", "um ID\") query = '''UPDATE contrato SET ativo = 0 WHERE id_contrato =", "NULL PRIMARY KEY, valor REAL NOT NULL, ativo INTEGER NOT NULL, dt_fim_contrato DATE", "DISTINCT id_inq from contrato where ativo); \"\"\") else: cursor.execute(\"\"\" SELECT * from inquilino;", "id_casa in [x['id_casa'] for x in c.todos_contratos() if x['ativo']]: raise CasaException() def todos_contratos(self):", "if id is None: raise Exception(\"Necessário prover um ID\") if valor is None:", "prover um Nome\") if rg is None: raise Exception(\"Necessário prover um RG\") try:", "cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo': x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4],", "None: raise Exception(\"Necessário prover um ID\") if not len(kwargs): raise Exception(\"Necessário prover novas", "SET {', '.join([f'{key}_inq = ?' for key in kwargs.keys()])} WHERE id_inq = ?'''", "IN ( SELECT casa.id_casa from casa JOIN contrato ON contrato.id_casa= casa.id_casa WHERE ativo", "if commit: self.conn.commit() return { 'num_instalacao': num_instalacao, 'cpf_titular': cpf } except sqlite3.Error as", "prover um valor para o aluguel.\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO", "valor de aluguel para o contrato\") if dia_vencimento is None: raise Exception(\"Necessário prover", "in c.todos_contratos() if x['ativo']]: raise InquilinoException() if id_casa and id_casa in [x['id_casa'] for", "self.conn.rollback() return None def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False): query = f'''UPDATE instalacao_eletrica", "not len(kwargs): raise Exception(\"Necessário prover novas informações para o Inquilino\") query = f'''UPDATE", "commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um ID\") if valor", "nome_casa INTEGER NOT NULL, valor_aluguel_casa INTEGER NOT NULL, agua_casa VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE,", "INSERT INTO inquilino(cpf_inq, nome_inq, rg_inq) VALUES (?, ?, ?) \"\"\", (cpf, nome, rg))", "CPF\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO instalacao_eletrica VALUES (?, ?) \"\"\",", "NOT NULL, id_casa INTEGER NOT NULL, id_inq INTEGER NOT NULL ); CREATE TABLE", "commit: self.conn.commit() return { 'num_instalacao': num_instalacao, 'cpf_titular': cpf } except sqlite3.Error as e:", "(?, ?, ?) \"\"\", (cpf, nome, rg)) if commit: self.conn.commit() return { 'id_inq':", "prover novas informações para o Inquilino\") query = f'''UPDATE casa SET {', '.join([f\"{key}{'_casa'", "def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False): if id_contrato is None: raise", "valor REAL NOT NULL, ativo INTEGER NOT NULL, dt_fim_contrato DATE NOT NULL, dia_venc_aluguel", "(id,)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def get_contrato(self,", "in kwargs.keys())) + tuple([id])) if commit: self.conn.commit() except sqlite3.Error as e: if rollback:", "cursor = self.conn.cursor() cursor.execute(query, (valor, id)) if commit: self.conn.commit() except sqlite3.Error as e:", "= ?; \"\"\", tuple([id])) contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1],", "\"\"\", (dt_venc, dt_pag, deposito, id_contrato)) if commit: self.conn.commit() return { 'id_pag': cursor.lastrowid ,", "ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa': casa, 'id_inq': inq } except sqlite3.Error as", "id_inq in (select DISTINCT id_inq from contrato where ativo); \"\"\") elif inativos: cursor.execute(\"\"\"", "pagamento(dt_venc, dt_pag, deposito, id_contrato) VALUES (?, ?, ?, ?) \"\"\", (dt_venc, dt_pag, deposito,", "self.conn.commit() return { 'id_contrato': cursor.lastrowid, 'valor': valor, 'ativo': ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento,", "nome.\") if valor_aluguel is None: raise Exception(\"Necessário prover um valor para o aluguel.\")", "e: # e if rollback: self.conn.rollback() return None def altera_instalacao(self, num_instalacao, cpf, commit=False,", "x in contratos] def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False): if id is None:", "rollback: self.conn.rollback() return None def todas_casas(self, vazias=False): cursor = self.conn.cursor() if vazias: cursor.execute(\"\"\"", "= self.conn.cursor() cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc, dt_pag, deposito, id_contrato) VALUES (?, ?, ?,", "num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY, cpf_titular VARCHAR(11) NOT NULL UNIQUE ); CREATE", "return { 'num_instalacao': num_instalacao, 'cpf_titular': cpf } except sqlite3.Error as e: # e", "x[1] } for x in instalacoes] class Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None, nome=None, rg=None,", "[{ 'id_pag': x[0] , 'dt_venc': x[1] , 'dt_pag': x[2] , 'deposito': x[3] ,", "= ?; \"\"\", (id_contrato)) pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc':", "NOT EXISTS inquilino( id_inq INTEGER NOT NULL PRIMARY KEY, cpf_inq VARCHAR(11) NOT NULL", "0 WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id, )) if", "id is None: raise Exception(\"Necessário prover um ID\") if not len(kwargs): raise Exception(\"Necessário", "PRIMARY KEY, nome_casa INTEGER NOT NULL, valor_aluguel_casa INTEGER NOT NULL, agua_casa VARCHAR(10), num_instalacao", "FROM contrato WHERE id_contrato = ?; \"\"\", tuple([id])) contratos = cursor.fetchall() return [{", "dt_fim_contrato DATE NOT NULL, dia_venc_aluguel INTEGER NOT NULL, id_casa INTEGER NOT NULL, id_inq", "? WHERE id_contrato = ?''' print(query) try: cursor = self.conn.cursor() cursor.execute(query, (valor, id))", "for x in contratos][0] class PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False,", "self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False): if num_instalacao is None:", "try: cursor = self.conn.cursor() cursor.execute(query, (valor, id)) if commit: self.conn.commit() except sqlite3.Error as", "c.num_instalacao = i.num_instalacao; \"\"\") casas = cursor.fetchall() return [{ 'id_casa': x[0], 'nome_casa': x[1],", "rollback=False): if valor is None: raise Exception(\"Necessário prover um valor de aluguel para", "} for x in contratos][0] class PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False,", "de aluguel para o contrato\") if dia_vencimento is None: raise Exception(\"Necessário prover uma", "cursor.execute(\"\"\" INSERT INTO casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao) VALUES (?,?,?,?) \"\"\", (nome, valor_aluguel, agua,", "cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq': nome, 'rg_inq': rg } except sqlite3.Error as e: if", "Contrato_DAO(DAO): def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None, fim_contrato=None, casa=None, inq=None, commit=False, rollback=False): if valor", "commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um ID\") C =", "= ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id,)) if commit: self.conn.commit() except sqlite3.Error", "InquilinoException() if id_casa and id_casa in [x['id_casa'] for x in c.todos_contratos() if x['ativo']]:", "{ 'num_instalacao': num_instalacao, 'cpf_titular': cpf } except sqlite3.Error as e: # e if", "valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False, rollback=False): if nome is None: raise Exception(\"Necessário prover nome.\")", "WHERE id_contrato = ?''' print(query) try: cursor = self.conn.cursor() cursor.execute(query, (valor, id)) if", "dia_vencimento is None: raise Exception(\"Necessário prover uma data de vencimento\") if casa is", "pagamento.id_contrato = ?; \"\"\", (id_contrato)) pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] ,", "(select DISTINCT id_inq from contrato where ativo); \"\"\") else: cursor.execute(\"\"\" SELECT * from", "cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento WHERE pagamento.id_contrato = ?; \"\"\",", "IF NOT EXISTS inquilino( id_inq INTEGER NOT NULL PRIMARY KEY, cpf_inq VARCHAR(11) NOT", "id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id, )) if commit: self.conn.commit()", "cursor.execute(\"\"\" INSERT INTO instalacao_eletrica VALUES (?, ?) \"\"\", (num_instalacao, cpf)) if commit: self.conn.commit()", "self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento; \"\"\") pagamentos = cursor.fetchall() return [{ 'id_pag':", "except sqlite3.Error as e: if rollback: self.conn.rollback() def inativa_contrato(self, id=None, commit=False, rollback=False): if", "= ? WHERE num_instalacao = ? ''' # return None try: cursor =", "'agua_casa': x[3], 'num_instalacao_eletrica': x[4], 'cpf': x[5] } for x in casas] def altera_casa(self,", "for x in inquilinos] def altera_inquilino(self, id=None, commit=False, rollback=False, **kwargs): if id is", "if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def ativa_contrato(self, id=None,", "if dt_venc is None: raise Exception(\"Necessário prover uma data de vencimento\") if dt_pag", "'id_contrato': cursor.lastrowid, 'valor': valor, 'ativo': ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa': casa, 'id_inq':", "Exception(\"Necessário escolher um inquilino\") try: cursor = self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\" INSERT INTO", "self.conn.cursor() cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc, dt_pag, deposito, id_contrato) VALUES (?, ?, ?, ?)", "de CPF\") if nome is None: raise Exception(\"Necessário prover um Nome\") if rg", ", 'id_contrato': x[4] } for x in pagamentos] def start_db(conn): cursor = conn.cursor()", "except sqlite3.Error as e: if rollback: self.conn.rollback() def ativa_contrato(self, id=None, commit=False, rollback=False): if", "self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] ) query = '''UPDATE contrato SET ativo = 1 WHERE", "CREATE TABLE IF NOT EXISTS casa( id_casa INTEGER NOT NULL PRIMARY KEY, nome_casa", "'id_contrato': x[0], 'valor': x[1], 'ativo': x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq':", "contrato WHERE id_contrato = ?; \"\"\", tuple([id])) contratos = cursor.fetchall() return [{ 'id_contrato':", "as e: # e if rollback: self.conn.rollback() return None def altera_instalacao(self, num_instalacao, cpf,", "id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False): if id_contrato is None: raise Exception(\"Necessário prover", "KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) ); CREATE TABLE IF NOT EXISTS inquilino( id_inq INTEGER", "is None: raise Exception(\"Necessário prover um ID\") if valor is None: raise Exception(\"Necessário", "?, ?, ?) \"\"\", (dt_venc, dt_pag, deposito, id_contrato)) if commit: self.conn.commit() return {", "id_casa INTEGER NOT NULL PRIMARY KEY, nome_casa INTEGER NOT NULL, valor_aluguel_casa INTEGER NOT", "NULL UNIQUE, nome_inq VARCHAR(40) NOT NULL, rg_inq VARCHAR(10) NOT NULL ); CREATE TABLE", "\"\"\") inquilinos = cursor.fetchall() return [{ 'id_inq': x[0], 'cpf_inq': x[1], 'nome_inq': x[2], 'rg_inq':", "e: if rollback: self.conn.rollback() def todas_instalacoes(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM", "sqlite3.Error as e: if rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False,", "dia_vencimento, casa, inq)) if commit: self.conn.commit() return { 'id_contrato': cursor.lastrowid, 'valor': valor, 'ativo':", "'dt_pag': x[2] , 'deposito': x[3] , 'id_contrato': x[4] } for x in pagamentos]", "NOT NULL, rg_inq VARCHAR(10) NOT NULL ); CREATE TABLE IF NOT EXISTS contrato(", "except sqlite3.Error as e: if rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None,", "class CasaException(Exception): ... class DAO(): def __init__(self, conn): self.conn = conn class Casa_DAO(DAO):", "adiciona_inquilino(self, cpf=None, nome=None, rg=None, commit=False, rollback=False): if cpf is None: raise Exception(\"Necessário prover", "if id is None: raise Exception(\"Necessário prover um ID\") C = self.get_contrato(id) self._valida(C['id_inq'],", "query = f'''UPDATE contrato SET valor = ? WHERE id_contrato = ?''' print(query)", "except sqlite3.Error as e: if rollback: self.conn.rollback() return None def todos_inquilinos(self, ativos=False, inativos=False):", "as e: if rollback: self.conn.rollback() class Contrato_DAO(DAO): def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None, fim_contrato=None,", "NOT NULL, dia_venc_aluguel INTEGER NOT NULL, id_casa INTEGER NOT NULL, id_inq INTEGER NOT", "'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa': casa, 'id_inq': inq } except sqlite3.Error as e:", "in c.todos_contratos() if x['ativo']]: raise CasaException() def todos_contratos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT", "def ativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um", "commit=False, rollback=False): if nome is None: raise Exception(\"Necessário prover nome.\") if valor_aluguel is", "= f'''UPDATE instalacao_eletrica SET cpf_titular = ? WHERE num_instalacao = ? ''' #", "None: raise Exception(\"Necessário prover uma data de vencimento\") if casa is None: raise", "None: raise Exception(\"Necessário escolher um inquilino\") try: cursor = self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\"", "ativo,fim_contrato, dia_vencimento, casa, inq)) if commit: self.conn.commit() return { 'id_contrato': cursor.lastrowid, 'valor': valor,", "in casas] def altera_casa(self, id=None, commit=False, rollback=False, **kwargs): if id is None: raise", "(nome, valor_aluguel, agua, instalacao_eletrica)) if commit: self.conn.commit() return { 'id_casa': cursor.lastrowid, 'nome_casa': nome,", "Exception(\"Necessário prover um ID\") C = self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] ) query = '''UPDATE", "class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False): if num_instalacao is None: raise", "self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def todas_instalacoes(self): cursor = self.conn.cursor()", "e: if rollback: self.conn.rollback() def inativa_contrato(self, id=None, commit=False, rollback=False): if id is None:", "= cursor.fetchall() return [{ 'num_instalacao': x[0], 'cpf_titular': x[1] } for x in instalacoes]", "todos_inquilinos(self, ativos=False, inativos=False): cursor = self.conn.cursor() if ativos and inativos: raise Exception(\"Conflito\") elif", "InquilinoException(Exception): ... class CasaException(Exception): ... class DAO(): def __init__(self, conn): self.conn = conn", "ID\") if not len(kwargs): raise Exception(\"Necessário prover novas informações para o Inquilino\") query", "self.conn.cursor() cursor.execute(query, (cpf, num_instalacao)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback:", "Nome\") if rg is None: raise Exception(\"Necessário prover um RG\") try: cursor =", "is None: raise Exception(\"Necessário prover um ID\") query = '''UPDATE contrato SET ativo", "None: raise Exception(\"Necessário prover um valor\") query = f'''UPDATE contrato SET valor =", "raise Exception(\"Necessário prover um valor para o aluguel.\") try: cursor = self.conn.cursor() cursor.execute(\"\"\"", "cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc': x[1] , 'dt_pag': x[2] , 'deposito':", "inativos=False): cursor = self.conn.cursor() if ativos and inativos: raise Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\"", "and inativos: raise Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\" select * from inquilino where id_inq", "uma data de vencimento\") if dt_pag is None: raise Exception(\"Necessário prover uma data", "= self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento; \"\"\") pagamentos = cursor.fetchall() return [{", "x[4], 'cpf': x[5] } for x in casas] def altera_casa(self, id=None, commit=False, rollback=False,", "ativos and inativos: raise Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\" select * from inquilino where", "sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ... class CasaException(Exception): ... class DAO(): def __init__(self, conn): self.conn", "self.conn.cursor() if vazias: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa", "raise Exception(\"Necessário prover um RG\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq,", "casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao WHERE c.id_casa NOT", "None try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id]))", "deposito, id_contrato)) if commit: self.conn.commit() return { 'id_pag': cursor.lastrowid , 'dt_venc': dt_venc ,", "inativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um ID\")", "instalação\") if cpf is None: raise Exception(\"Necessário prover um número de CPF\") try:", "id_casa and id_casa in [x['id_casa'] for x in c.todos_contratos() if x['ativo']]: raise CasaException()", "as e: if rollback: self.conn.rollback() def get_contrato(self, id): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT", "= 1 WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id,)) if", "* FROM pagamento WHERE pagamento.id_contrato = ?; \"\"\", (id_contrato)) pagamentos = cursor.fetchall() return", "cursor.fetchall() return [{ 'id_casa': x[0], 'nome_casa': x[1], 'valor_aluguel': x[2], 'agua_casa': x[3], 'num_instalacao_eletrica': x[4],", "FROM instalacao_eletrica; \"\"\") instalcoes = cursor.fetchall() return [{ 'num_instalacao': x[0], 'cpf_titular': x[1] }", "API do Gerenciador de Casas de Aluguel ====================================== \"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3", "x[0] , 'dt_venc': x[1] , 'dt_pag': x[2] , 'deposito': x[3] , 'id_contrato': x[4]", "x[0], 'valor': x[1], 'ativo': x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6]", "self.conn.rollback() return None def todas_casas(self, vazias=False): cursor = self.conn.cursor() if vazias: cursor.execute(\"\"\" SELECT", "C['id_casa'] ) query = '''UPDATE contrato SET ativo = 1 WHERE id_contrato =", "raise Exception(\"Necessário prover um ID\") if valor is None: raise Exception(\"Necessário prover um", "* FROM instalacao_eletrica; \"\"\") instalcoes = cursor.fetchall() return [{ 'num_instalacao': x[0], 'cpf_titular': x[1]", "commit: self.conn.commit() return { 'id_contrato': cursor.lastrowid, 'valor': valor, 'ativo': ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel':", "(?, ?) \"\"\", (num_instalacao, cpf)) if commit: self.conn.commit() return { 'num_instalacao': num_instalacao, 'cpf_titular':", "e: if rollback: self.conn.rollback() return None def todos_pagamentos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT", "def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False, rollback=False): if nome is None: raise", "INTEGER NOT NULL PRIMARY KEY, dt_venc VARCHAR(23) NOT NULL, dt_pag VARCHAR(23), deposito INTEGER", "cpf)) if commit: self.conn.commit() return { 'num_instalacao': num_instalacao, 'cpf_titular': cpf } except sqlite3.Error", "pagamento; \"\"\") pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc': x[1] ,", "''' # return None try: cursor = self.conn.cursor() cursor.execute(query, (cpf, num_instalacao)) if commit:", "commit=False, rollback=False): if num_instalacao is None: raise Exception(\"Necessário prover um número de instalação\")", "= self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM instalacao_eletrica; \"\"\") instalcoes = cursor.fetchall() return [{", "valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao", "id_contrato } except sqlite3.Error as e: if rollback: self.conn.rollback() return None def todos_pagamentos(self):", "'rg_inq': rg } except sqlite3.Error as e: if rollback: self.conn.rollback() return None def", "raise CasaException() def todos_contratos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato; \"\"\")", "is None: raise Exception(\"Necessário escolher um inquilino\") try: cursor = self.conn.cursor() self._valida(inq, casa)", "TABLE IF NOT EXISTS pagamento( id_pag INTEGER NOT NULL PRIMARY KEY, dt_venc VARCHAR(23)", "FROM casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao; \"\"\") casas", "in pagamentos] def todos_pagamentos_contrato(self, id_contrato): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento", "(valor, id)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def", "cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa c LEFT JOIN", "INTO casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao) VALUES (?,?,?,?) \"\"\", (nome, valor_aluguel, agua, instalacao_eletrica)) if", "Contrato_DAO(make_connection()) if id_inq and id_inq in [x['id_inq'] for x in c.todos_contratos() if x['ativo']]:", "valor is None: raise Exception(\"Necessário prover um valor\") query = f'''UPDATE contrato SET", "self.conn.rollback() def inativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover", "nome_inq VARCHAR(40) NOT NULL, rg_inq VARCHAR(10) NOT NULL ); CREATE TABLE IF NOT", "commit: self.conn.commit() return { 'id_pag': cursor.lastrowid , 'dt_venc': dt_venc , 'dt_pag': dt_pag ,", "import config def make_connection(): return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ... class CasaException(Exception): ... class", "'.join([f\"{key}{'_casa' if key != 'num_instalacao' else '' } = ?\" for key in", "deposito=False, commit=False, rollback=False): if id_contrato is None: raise Exception(\"Necessário prover um contrato\") if", "from inquilino where id_inq in (select DISTINCT id_inq from contrato where ativo); \"\"\")", "return { 'id_pag': cursor.lastrowid , 'dt_venc': dt_venc , 'dt_pag': dt_pag , 'deposito': deposito", "conn): self.conn = conn class Casa_DAO(DAO): def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False,", "self.conn = conn class Casa_DAO(DAO): def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None, instalacao_eletrica=None, commit=False, rollback=False):", "sqlite3.Error as e: if rollback: self.conn.rollback() return None def _valida(self, id_inq=None, id_casa=None): c", "if vazias: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa c", "None: raise Exception(\"Necessário prover nome.\") if valor_aluguel is None: raise Exception(\"Necessário prover um", "num_instalacao=None, cpf=None, commit=False, rollback=False): if num_instalacao is None: raise Exception(\"Necessário prover um número", "vazias: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa c LEFT", "um ID\") if valor is None: raise Exception(\"Necessário prover um valor\") query =", "if casa is None: raise Exception(\"Necessário escolher uma casa\") if inq is None:", "DATE NOT NULL, dia_venc_aluguel INTEGER NOT NULL, id_casa INTEGER NOT NULL, id_inq INTEGER", "o Inquilino\") query = f'''UPDATE inquilino SET {', '.join([f'{key}_inq = ?' for key", "NOT NULL UNIQUE, nome_inq VARCHAR(40) NOT NULL, rg_inq VARCHAR(10) NOT NULL ); CREATE", "cpf, 'nome_inq': nome, 'rg_inq': rg } except sqlite3.Error as e: if rollback: self.conn.rollback()", "prover um número de instalação\") if cpf is None: raise Exception(\"Necessário prover um", "ON c.num_instalacao = i.num_instalacao; \"\"\") casas = cursor.fetchall() return [{ 'id_casa': x[0], 'nome_casa':", "cpf is None: raise Exception(\"Necessário prover um número de CPF\") try: cursor =", "'dia_venc_aluguel': dia_vencimento, 'id_casa': casa, 'id_inq': inq } except sqlite3.Error as e: if rollback:", "do Gerenciador de Casas de Aluguel ====================================== \"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import", "'deposito': x[3] , 'id_contrato': x[4] } for x in pagamentos] def todos_pagamentos_contrato(self, id_contrato):", "); CREATE TABLE IF NOT EXISTS contrato( id_contrato INTEGER NOT NULL PRIMARY KEY,", "sqlite3.Error as e: if rollback: self.conn.rollback() class Contrato_DAO(DAO): def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None,", "cursor.execute(\"\"\" SELECT * FROM pagamento; \"\"\") pagamentos = cursor.fetchall() return [{ 'id_pag': x[0]", "todas_casas(self, vazias=False): cursor = self.conn.cursor() if vazias: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa,", "self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def get_contrato(self, id): cursor =", "escolher uma casa\") if inq is None: raise Exception(\"Necessário escolher um inquilino\") try:", "'id_inq': inq } except sqlite3.Error as e: if rollback: self.conn.rollback() return None def", "instalacao_eletrica } except sqlite3.Error as e: if rollback: self.conn.rollback() return None def todas_casas(self,", "= self.conn.cursor() cursor.execute(query, (cpf, num_instalacao)) if commit: self.conn.commit() except sqlite3.Error as e: if", "None def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False): query = f'''UPDATE instalacao_eletrica SET cpf_titular", "(num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) ); CREATE TABLE IF NOT EXISTS inquilino( id_inq INTEGER NOT", "raise Exception(\"Necessário prover um número de CPF\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT", "https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import config def make_connection(): return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ... class", "cursor = self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\" INSERT INTO contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa,", "= self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento WHERE pagamento.id_contrato = ?; \"\"\", (id_contrato))", "num_instalacao is None: raise Exception(\"Necessário prover um número de instalação\") if cpf is", "if id_contrato is None: raise Exception(\"Necessário prover um contrato\") if dt_venc is None:", "SELECT * FROM instalacao_eletrica; \"\"\") instalcoes = cursor.fetchall() return [{ 'num_instalacao': x[0], 'cpf_titular':", "NULL, agua_casa VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE, FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) ); CREATE", "rollback: self.conn.rollback() def get_contrato(self, id): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato", ", 'dt_pag': x[2] , 'deposito': x[3] , 'id_contrato': x[4] } for x in", "de vencimento\") if dt_pag is None: raise Exception(\"Necessário prover uma data de pagamento\")", "if rollback: self.conn.rollback() def inativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise", "tuple((kwargs[k] for k in kwargs.keys())) + tuple([id])) if commit: self.conn.commit() except sqlite3.Error as", "cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq, nome_inq, rg_inq) VALUES (?, ?, ?)", "um valor\") query = f'''UPDATE contrato SET valor = ? WHERE id_contrato =", "ativo ) GROUP BY c.id_casa; \"\"\") else: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa,", "'rg_inq': x[3] } for x in inquilinos] def altera_inquilino(self, id=None, commit=False, rollback=False, **kwargs):", "casa.id_casa from casa JOIN contrato ON contrato.id_casa= casa.id_casa WHERE ativo ) GROUP BY", "x[1], 'nome_inq': x[2], 'rg_inq': x[3] } for x in inquilinos] def altera_inquilino(self, id=None,", "raise Exception(\"Necessário prover um contrato\") if dt_venc is None: raise Exception(\"Necessário prover uma", "id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id,)) if commit: self.conn.commit() except", "\"\"\") instalcoes = cursor.fetchall() return [{ 'num_instalacao': x[0], 'cpf_titular': x[1] } for x", "} = ?\" for key in kwargs.keys()])} WHERE id_casa = ?''' # return", "except sqlite3.Error as e: if rollback: self.conn.rollback() return None def _valida(self, id_inq=None, id_casa=None):", "um ID\") C = self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] ) query = '''UPDATE contrato SET", "para o aluguel.\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO casa(nome_casa, valor_aluguel_casa, agua_casa,", "len(kwargs): raise Exception(\"Necessário prover novas informações para o Inquilino\") query = f'''UPDATE casa", "1 WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id,)) if commit:", "?) \"\"\", (cpf, nome, rg)) if commit: self.conn.commit() return { 'id_inq': cursor.lastrowid, 'cpf_inq':", "instalacoes] class Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None, nome=None, rg=None, commit=False, rollback=False): if cpf is", "in [x['id_inq'] for x in c.todos_contratos() if x['ativo']]: raise InquilinoException() if id_casa and", "f'''UPDATE casa SET {', '.join([f\"{key}{'_casa' if key != 'num_instalacao' else '' } =", "contrato SET valor = ? WHERE id_contrato = ?''' print(query) try: cursor =", "um contrato\") if dt_venc is None: raise Exception(\"Necessário prover uma data de vencimento\")", "contrato SET ativo = 1 WHERE id_contrato = ?''' try: cursor = self.conn.cursor()", "casa, inq)) if commit: self.conn.commit() return { 'id_contrato': cursor.lastrowid, 'valor': valor, 'ativo': ativo,", "if rollback: self.conn.rollback() return None def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False): query =", "'dt_venc': x[1] , 'dt_pag': x[2] , 'deposito': x[3] , 'id_contrato': x[4] } for", "pagamento WHERE pagamento.id_contrato = ?; \"\"\", (id_contrato)) pagamentos = cursor.fetchall() return [{ 'id_pag':", "self.conn.cursor() cursor.execute(\"\"\" INSERT INTO casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao) VALUES (?,?,?,?) \"\"\", (nome, valor_aluguel,", "data de vencimento\") if dt_pag is None: raise Exception(\"Necessário prover uma data de", "{ 'id_inq': cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq': nome, 'rg_inq': rg } except sqlite3.Error as", "x[2], 'rg_inq': x[3] } for x in inquilinos] def altera_inquilino(self, id=None, commit=False, rollback=False,", "# return None try: cursor = self.conn.cursor() cursor.execute(query, (cpf, num_instalacao)) if commit: self.conn.commit()", "cursor.execute(\"\"\" SELECT * FROM contrato; \"\"\") contratos = cursor.fetchall() return [{ 'id_contrato': x[0],", "id_contrato) VALUES (?, ?, ?, ?) \"\"\", (dt_venc, dt_pag, deposito, id_contrato)) if commit:", "Casas de Aluguel ====================================== \"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import config def make_connection():", "else '' } = ?\" for key in kwargs.keys()])} WHERE id_casa = ?'''", "in contratos][0] class PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False): if", "where ativo); \"\"\") else: cursor.execute(\"\"\" SELECT * from inquilino; \"\"\") inquilinos = cursor.fetchall()", "x[4] } for x in pagamentos] def start_db(conn): cursor = conn.cursor() cursor.executescript(\"\"\" CREATE", "id=None, commit=False, rollback=False, **kwargs): if id is None: raise Exception(\"Necessário prover um ID\")", "'''UPDATE contrato SET ativo = 1 WHERE id_contrato = ?''' try: cursor =", "Exception(\"Necessário prover um número de CPF\") if nome is None: raise Exception(\"Necessário prover", "'id_contrato': x[4] } for x in pagamentos] def start_db(conn): cursor = conn.cursor() cursor.executescript(\"\"\"", "x['ativo']]: raise CasaException() def todos_contratos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato;", "cursor.execute(\"\"\" SELECT * FROM pagamento WHERE pagamento.id_contrato = ?; \"\"\", (id_contrato)) pagamentos =", "SET {', '.join([f\"{key}{'_casa' if key != 'num_instalacao' else '' } = ?\" for", "sqlite3 import config def make_connection(): return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ... class CasaException(Exception): ...", "self.conn.commit() return { 'id_inq': cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq': nome, 'rg_inq': rg } except", "return [{ 'id_inq': x[0], 'cpf_inq': x[1], 'nome_inq': x[2], 'rg_inq': x[3] } for x", "tuple([id])) contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo': x[2], 'dt_fim_contrato':", "in [x['id_casa'] for x in c.todos_contratos() if x['ativo']]: raise CasaException() def todos_contratos(self): cursor", "REFERENCES instalacao_eletrica(num_instalacao) ); CREATE TABLE IF NOT EXISTS inquilino( id_inq INTEGER NOT NULL", "nome, 'rg_inq': rg } except sqlite3.Error as e: if rollback: self.conn.rollback() return None", ", 'id_contrato': x[4] } for x in pagamentos] def todos_pagamentos_contrato(self, id_contrato): cursor =", "raise Exception(\"Necessário prover novas informações para o Inquilino\") query = f'''UPDATE inquilino SET", "commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self,", "e: if rollback: self.conn.rollback() return None def todos_inquilinos(self, ativos=False, inativos=False): cursor = self.conn.cursor()", "= cursor.fetchall() return [{ 'id_casa': x[0], 'nome_casa': x[1], 'valor_aluguel': x[2], 'agua_casa': x[3], 'num_instalacao_eletrica':", "def todos_inquilinos(self, ativos=False, inativos=False): cursor = self.conn.cursor() if ativos and inativos: raise Exception(\"Conflito\")", "( num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY, cpf_titular VARCHAR(11) NOT NULL UNIQUE );", "TABLE IF NOT EXISTS casa( id_casa INTEGER NOT NULL PRIMARY KEY, nome_casa INTEGER", "CREATE TABLE IF NOT EXISTS inquilino( id_inq INTEGER NOT NULL PRIMARY KEY, cpf_inq", "if rollback: self.conn.rollback() def todas_instalacoes(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM instalacao_eletrica;", "* from inquilino; \"\"\") inquilinos = cursor.fetchall() return [{ 'id_inq': x[0], 'cpf_inq': x[1],", "DAO(): def __init__(self, conn): self.conn = conn class Casa_DAO(DAO): def adiciona_casa(self, nome=None, valor_aluguel=None,", "if rollback: self.conn.rollback() return None def todos_inquilinos(self, ativos=False, inativos=False): cursor = self.conn.cursor() if", "self.conn.rollback() def todas_instalacoes(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM instalacao_eletrica; \"\"\") instalcoes", "select * from inquilino where id_inq not in (select DISTINCT id_inq from contrato", "NULL PRIMARY KEY, cpf_titular VARCHAR(11) NOT NULL UNIQUE ); CREATE TABLE IF NOT", "as e: if rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False):", "print(query) try: cursor = self.conn.cursor() cursor.execute(query, (valor, id)) if commit: self.conn.commit() except sqlite3.Error", "SET cpf_titular = ? WHERE num_instalacao = ? ''' # return None try:", "?''' try: cursor = self.conn.cursor() cursor.execute(query, (id,)) if commit: self.conn.commit() except sqlite3.Error as", "NOT NULL, agua_casa VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE, FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) );", "def todos_pagamentos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento; \"\"\") pagamentos =", "KEY, valor REAL NOT NULL, ativo INTEGER NOT NULL, dt_fim_contrato DATE NOT NULL,", "'id_contrato': id_contrato } except sqlite3.Error as e: if rollback: self.conn.rollback() return None def", "id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um ID\") query", "?''' print(query) try: cursor = self.conn.cursor() cursor.execute(query, (valor, id)) if commit: self.conn.commit() except", "is None: raise Exception(\"Necessário prover um valor de aluguel para o contrato\") if", "RG\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq, nome_inq, rg_inq) VALUES (?,", "class PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False): if id_contrato is", "um valor para o aluguel.\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO casa(nome_casa,", "c.todos_contratos() if x['ativo']]: raise CasaException() def todos_contratos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT *", "de Casas de Aluguel ====================================== \"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import config def", "x[2], 'agua_casa': x[3], 'num_instalacao_eletrica': x[4], 'cpf': x[5] } for x in casas] def", "WHERE id_casa = ?''' # return None try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k]", "except sqlite3.Error as e: if rollback: self.conn.rollback() return None def todos_pagamentos(self): cursor =", "'valor': x[1], 'ativo': x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] }", "if valor is None: raise Exception(\"Necessário prover um valor de aluguel para o", "(?,?,?,?) \"\"\", (nome, valor_aluguel, agua, instalacao_eletrica)) if commit: self.conn.commit() return { 'id_casa': cursor.lastrowid,", "id_contrato INTEGER NOT NULL PRIMARY KEY, valor REAL NOT NULL, ativo INTEGER NOT", "if dt_pag is None: raise Exception(\"Necessário prover uma data de pagamento\") try: cursor", "x in c.todos_contratos() if x['ativo']]: raise CasaException() def todos_contratos(self): cursor = self.conn.cursor() cursor.execute(\"\"\"", "agua_casa, i.num_instalacao, cpf_titular FROM casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao =", "para o contrato\") if dia_vencimento is None: raise Exception(\"Necessário prover uma data de", "cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato WHERE id_contrato = ?; \"\"\",", "if nome is None: raise Exception(\"Necessário prover um Nome\") if rg is None:", "'id_inq': x[6] } for x in contratos] def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False):", "as e: if rollback: self.conn.rollback() return None def todos_inquilinos(self, ativos=False, inativos=False): cursor =", "deposito , 'id_contrato': id_contrato } except sqlite3.Error as e: if rollback: self.conn.rollback() return", "x[2] , 'deposito': x[3] , 'id_contrato': x[4] } for x in pagamentos] def", "NULL PRIMARY KEY, nome_casa INTEGER NOT NULL, valor_aluguel_casa INTEGER NOT NULL, agua_casa VARCHAR(10),", "def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None, fim_contrato=None, casa=None, inq=None, commit=False, rollback=False): if valor is", "?, ?) \"\"\", (cpf, nome, rg)) if commit: self.conn.commit() return { 'id_inq': cursor.lastrowid,", "dia_venc_aluguel INTEGER NOT NULL, id_casa INTEGER NOT NULL, id_inq INTEGER NOT NULL );", "cursor.execute(query, (id,)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def", "agua_casa VARCHAR(10), num_instalacao VARCHAR(11) UNIQUE, FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao) ); CREATE TABLE", "is None: raise Exception(\"Necessário prover um Nome\") if rg is None: raise Exception(\"Necessário", "query = '''UPDATE contrato SET ativo = 0 WHERE id_contrato = ?''' try:", "WHERE pagamento.id_contrato = ?; \"\"\", (id_contrato)) pagamentos = cursor.fetchall() return [{ 'id_pag': x[0]", "casa) cursor.execute(\"\"\" INSERT INTO contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq) VALUES (?,?,?,?,?,?) \"\"\",", "dt_venc VARCHAR(23) NOT NULL, dt_pag VARCHAR(23), deposito INTEGER NOT NULL, id_contrato INTEGER ,", "ON c.num_instalacao = i.num_instalacao WHERE c.id_casa NOT IN ( SELECT casa.id_casa from casa", "} for x in instalacoes] class Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None, nome=None, rg=None, commit=False,", "dia_vencimento, 'id_casa': casa, 'id_inq': inq } except sqlite3.Error as e: if rollback: self.conn.rollback()", "is None: raise Exception(\"Necessário prover uma data de vencimento\") if dt_pag is None:", "NOT EXISTS pagamento( id_pag INTEGER NOT NULL PRIMARY KEY, dt_venc VARCHAR(23) NOT NULL,", "altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False): query = f'''UPDATE instalacao_eletrica SET cpf_titular = ?", "nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa c LEFT JOIN instalacao_eletrica i ON", "if rollback: self.conn.rollback() return None def todas_casas(self, vazias=False): cursor = self.conn.cursor() if vazias:", "x[4], 'id_casa': x[5], 'id_inq': x[6] } for x in contratos] def altera_valor_contrato(self, id=None,", "'nome_casa': x[1], 'valor_aluguel': x[2], 'agua_casa': x[3], 'num_instalacao_eletrica': x[4], 'cpf': x[5] } for x", "= Contrato_DAO(make_connection()) if id_inq and id_inq in [x['id_inq'] for x in c.todos_contratos() if", "Aluguel ====================================== \"\"\" # https://www.pythoncentral.io/introduction-to-sqlite-in-python/ import sqlite3 import config def make_connection(): return sqlite3.connect(config.DATABASE_URL)", "in kwargs.keys()])} WHERE id_casa = ?''' # return None try: cursor = self.conn.cursor()", "is None: raise Exception(\"Necessário prover um número de CPF\") try: cursor = self.conn.cursor()", "o Inquilino\") query = f'''UPDATE casa SET {', '.join([f\"{key}{'_casa' if key != 'num_instalacao'", "cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento; \"\"\") pagamentos = cursor.fetchall() return", "= ? ''' # return None try: cursor = self.conn.cursor() cursor.execute(query, (cpf, num_instalacao))", "(cpf, num_instalacao)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def", "for x in pagamentos] def start_db(conn): cursor = conn.cursor() cursor.executescript(\"\"\" CREATE TABLE IF", "} except sqlite3.Error as e: if rollback: self.conn.rollback() return None def todas_casas(self, vazias=False):", "query = f'''UPDATE instalacao_eletrica SET cpf_titular = ? WHERE num_instalacao = ? '''", "* FROM pagamento; \"\"\") pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] , 'dt_venc':", "as e: if rollback: self.conn.rollback() def todas_instalacoes(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT *", "'dt_pag': dt_pag , 'deposito': deposito , 'id_contrato': id_contrato } except sqlite3.Error as e:", "None: raise Exception(\"Necessário prover um ID\") if valor is None: raise Exception(\"Necessário prover", "CREATE TABLE IF NOT EXISTS pagamento( id_pag INTEGER NOT NULL PRIMARY KEY, dt_venc", "( SELECT casa.id_casa from casa JOIN contrato ON contrato.id_casa= casa.id_casa WHERE ativo )", "cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id])) if commit: self.conn.commit() except sqlite3.Error", "TABLE IF NOT EXISTS contrato( id_contrato INTEGER NOT NULL PRIMARY KEY, valor REAL", "BY c.id_casa; \"\"\") else: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM", "'id_casa': cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel': valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica } except sqlite3.Error", "'ativo': ativo, 'dt_fim_contrato': fim_contrato, 'dia_venc_aluguel': dia_vencimento, 'id_casa': casa, 'id_inq': inq } except sqlite3.Error", "prover uma data de vencimento\") if casa is None: raise Exception(\"Necessário escolher uma", "INSERT INTO instalacao_eletrica VALUES (?, ?) \"\"\", (num_instalacao, cpf)) if commit: self.conn.commit() return", "VARCHAR(23), deposito INTEGER NOT NULL, id_contrato INTEGER , FOREIGN KEY (id_contrato) REFERENCES contrato(id_contrato)", "x in instalacoes] class Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None, nome=None, rg=None, commit=False, rollback=False): if", "id_casa = ?''' # return None try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for", "C = self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] ) query = '''UPDATE contrato SET ativo =", "prover um contrato\") if dt_venc is None: raise Exception(\"Necessário prover uma data de", "try: cursor = self.conn.cursor() cursor.execute(query, (cpf, num_instalacao)) if commit: self.conn.commit() except sqlite3.Error as", "where id_inq not in (select DISTINCT id_inq from contrato where ativo); \"\"\") else:", "cursor.execute(\"\"\" select * from inquilino where id_inq not in (select DISTINCT id_inq from", "is None: raise Exception(\"Necessário prover uma data de pagamento\") try: cursor = self.conn.cursor()", "commit: self.conn.commit() return { 'id_casa': cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel': valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica':", "return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo': x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa':", "rg)) if commit: self.conn.commit() return { 'id_inq': cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq': nome, 'rg_inq':", "KEY, nome_casa INTEGER NOT NULL, valor_aluguel_casa INTEGER NOT NULL, agua_casa VARCHAR(10), num_instalacao VARCHAR(11)", "VARCHAR(40) NOT NULL, rg_inq VARCHAR(10) NOT NULL ); CREATE TABLE IF NOT EXISTS", "inq)) if commit: self.conn.commit() return { 'id_contrato': cursor.lastrowid, 'valor': valor, 'ativo': ativo, 'dt_fim_contrato':", "x[6] } for x in contratos] def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False): if", "if rg is None: raise Exception(\"Necessário prover um RG\") try: cursor = self.conn.cursor()", "if rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False): if num_instalacao", "= ? WHERE id_contrato = ?''' print(query) try: cursor = self.conn.cursor() cursor.execute(query, (valor,", "rg is None: raise Exception(\"Necessário prover um RG\") try: cursor = self.conn.cursor() cursor.execute(\"\"\"", "if inq is None: raise Exception(\"Necessário escolher um inquilino\") try: cursor = self.conn.cursor()", "return None def todos_pagamentos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento; \"\"\")", "self.conn.rollback() return None def todos_pagamentos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento;", "data de vencimento\") if casa is None: raise Exception(\"Necessário escolher uma casa\") if", "CPF\") if nome is None: raise Exception(\"Necessário prover um Nome\") if rg is", ", 'dt_pag': dt_pag , 'deposito': deposito , 'id_contrato': id_contrato } except sqlite3.Error as", "EXISTS contrato( id_contrato INTEGER NOT NULL PRIMARY KEY, valor REAL NOT NULL, ativo", "self.conn.rollback() return None def _valida(self, id_inq=None, id_casa=None): c = Contrato_DAO(make_connection()) if id_inq and", "JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao WHERE c.id_casa NOT IN ( SELECT", "[{ 'id_casa': x[0], 'nome_casa': x[1], 'valor_aluguel': x[2], 'agua_casa': x[3], 'num_instalacao_eletrica': x[4], 'cpf': x[5]", "realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False): if id_contrato is None: raise Exception(\"Necessário", "VALUES (?,?,?,?) \"\"\", (nome, valor_aluguel, agua, instalacao_eletrica)) if commit: self.conn.commit() return { 'id_casa':", "kwargs.keys()])} WHERE id_casa = ?''' # return None try: cursor = self.conn.cursor() cursor.execute(query,", "} except sqlite3.Error as e: if rollback: self.conn.rollback() return None def todos_inquilinos(self, ativos=False,", "def get_contrato(self, id): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato WHERE id_contrato", "c.todos_contratos() if x['ativo']]: raise InquilinoException() if id_casa and id_casa in [x['id_casa'] for x", "as e: if rollback: self.conn.rollback() def inativa_contrato(self, id=None, commit=False, rollback=False): if id is", "Inquilino_DAO(DAO): def adiciona_inquilino(self, cpf=None, nome=None, rg=None, commit=False, rollback=False): if cpf is None: raise", "casa=None, inq=None, commit=False, rollback=False): if valor is None: raise Exception(\"Necessário prover um valor", "NOT NULL PRIMARY KEY, cpf_inq VARCHAR(11) NOT NULL UNIQUE, nome_inq VARCHAR(40) NOT NULL,", "JOIN contrato ON contrato.id_casa= casa.id_casa WHERE ativo ) GROUP BY c.id_casa; \"\"\") else:", "contrato\") if dt_venc is None: raise Exception(\"Necessário prover uma data de vencimento\") if", "None: raise Exception(\"Necessário prover um Nome\") if rg is None: raise Exception(\"Necessário prover", "cursor.execute(\"\"\" INSERT INTO contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq) VALUES (?,?,?,?,?,?) \"\"\", (valor,", "ativo); \"\"\") elif inativos: cursor.execute(\"\"\" select * from inquilino where id_inq not in", "'''UPDATE contrato SET ativo = 0 WHERE id_contrato = ?''' try: cursor =", "deposito INTEGER NOT NULL, id_contrato INTEGER , FOREIGN KEY (id_contrato) REFERENCES contrato(id_contrato) );", "vencimento\") if dt_pag is None: raise Exception(\"Necessário prover uma data de pagamento\") try:", "rollback=False, **kwargs): if id is None: raise Exception(\"Necessário prover um ID\") if not", "self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def ativa_contrato(self, id=None, commit=False, rollback=False):", "self.conn.rollback() def ativa_contrato(self, id=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover", "ativo INTEGER NOT NULL, dt_fim_contrato DATE NOT NULL, dia_venc_aluguel INTEGER NOT NULL, id_casa", "prover um RG\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO inquilino(cpf_inq, nome_inq, rg_inq)", "from contrato where ativo); \"\"\") else: cursor.execute(\"\"\" SELECT * from inquilino; \"\"\") inquilinos", "raise Exception(\"Necessário prover um número de CPF\") if nome is None: raise Exception(\"Necessário", "self.conn.rollback() return None def todos_inquilinos(self, ativos=False, inativos=False): cursor = self.conn.cursor() if ativos and", "prover um número de CPF\") if nome is None: raise Exception(\"Necessário prover um", "return None def todas_casas(self, vazias=False): cursor = self.conn.cursor() if vazias: cursor.execute(\"\"\" SELECT c.id_casa,", "as e: if rollback: self.conn.rollback() def ativa_contrato(self, id=None, commit=False, rollback=False): if id is", "de CPF\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO instalacao_eletrica VALUES (?, ?)", "um valor de aluguel para o contrato\") if dia_vencimento is None: raise Exception(\"Necessário", "\"\"\", (cpf, nome, rg)) if commit: self.conn.commit() return { 'id_inq': cursor.lastrowid, 'cpf_inq': cpf,", "= '''UPDATE contrato SET ativo = 0 WHERE id_contrato = ?''' try: cursor", "None def todos_pagamentos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM pagamento; \"\"\") pagamentos", "VARCHAR(11) NOT NULL UNIQUE, nome_inq VARCHAR(40) NOT NULL, rg_inq VARCHAR(10) NOT NULL );", "pagamento\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc, dt_pag, deposito, id_contrato) VALUES", "= self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] ) query = '''UPDATE contrato SET ativo = 1", "INTO contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq) VALUES (?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato, dia_vencimento,", "commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def todas_instalacoes(self): cursor =", "if commit: self.conn.commit() return { 'id_contrato': cursor.lastrowid, 'valor': valor, 'ativo': ativo, 'dt_fim_contrato': fim_contrato,", "None: raise Exception(\"Necessário prover um ID\") C = self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] ) query", "'valor_aluguel': x[2], 'agua_casa': x[3], 'num_instalacao_eletrica': x[4], 'cpf': x[5] } for x in casas]", "Exception(\"Necessário prover um ID\") if not len(kwargs): raise Exception(\"Necessário prover novas informações para", "self._valida(C['id_inq'], C['id_casa'] ) query = '''UPDATE contrato SET ativo = 1 WHERE id_contrato", "instalcoes = cursor.fetchall() return [{ 'num_instalacao': x[0], 'cpf_titular': x[1] } for x in", "raise Exception(\"Necessário escolher um inquilino\") try: cursor = self.conn.cursor() self._valida(inq, casa) cursor.execute(\"\"\" INSERT", "x in contratos][0] class PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False):", "def todas_casas(self, vazias=False): cursor = self.conn.cursor() if vazias: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa,", "from casa JOIN contrato ON contrato.id_casa= casa.id_casa WHERE ativo ) GROUP BY c.id_casa;", "x[0], 'nome_casa': x[1], 'valor_aluguel': x[2], 'agua_casa': x[3], 'num_instalacao_eletrica': x[4], 'cpf': x[5] } for", "um número de instalação\") if cpf is None: raise Exception(\"Necessário prover um número", "INSERT INTO casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao) VALUES (?,?,?,?) \"\"\", (nome, valor_aluguel, agua, instalacao_eletrica))", "elif inativos: cursor.execute(\"\"\" select * from inquilino where id_inq not in (select DISTINCT", "instalacao_eletrica=None, commit=False, rollback=False): if nome is None: raise Exception(\"Necessário prover nome.\") if valor_aluguel", "None: raise Exception(\"Necessário prover um número de CPF\") if nome is None: raise", "cursor.lastrowid , 'dt_venc': dt_venc , 'dt_pag': dt_pag , 'deposito': deposito , 'id_contrato': id_contrato", "fim_contrato=None, casa=None, inq=None, commit=False, rollback=False): if valor is None: raise Exception(\"Necessário prover um", "cpf=None, commit=False, rollback=False): if num_instalacao is None: raise Exception(\"Necessário prover um número de", "altera_casa(self, id=None, commit=False, rollback=False, **kwargs): if id is None: raise Exception(\"Necessário prover um", "NULL PRIMARY KEY, dt_venc VARCHAR(23) NOT NULL, dt_pag VARCHAR(23), deposito INTEGER NOT NULL,", "is None: raise Exception(\"Necessário escolher uma casa\") if inq is None: raise Exception(\"Necessário", "o contrato\") if dia_vencimento is None: raise Exception(\"Necessário prover uma data de vencimento\")", "e: if rollback: self.conn.rollback() return None def todas_casas(self, vazias=False): cursor = self.conn.cursor() if", "= ?''' # return None try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k", "id_contrato = ?; \"\"\", tuple([id])) contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor':", "'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] } for x in contratos][0]", "id): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato WHERE id_contrato = ?;", "i.num_instalacao, cpf_titular FROM casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao;", "config def make_connection(): return sqlite3.connect(config.DATABASE_URL) class InquilinoException(Exception): ... class CasaException(Exception): ... class DAO():", "NULL PRIMARY KEY, cpf_inq VARCHAR(11) NOT NULL UNIQUE, nome_inq VARCHAR(40) NOT NULL, rg_inq", "} for x in casas] def altera_casa(self, id=None, commit=False, rollback=False, **kwargs): if id", "\"\"\") else: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa c", "'num_instalacao': x[0], 'cpf_titular': x[1] } for x in instalacoes] class Inquilino_DAO(DAO): def adiciona_inquilino(self,", "is None: raise Exception(\"Necessário prover uma data de vencimento\") if casa is None:", "SELECT * FROM contrato WHERE id_contrato = ?; \"\"\", tuple([id])) contratos = cursor.fetchall()", "'id_casa': x[5], 'id_inq': x[6] } for x in contratos][0] class PagamentoDAO(DAO): def realiza_pagamento(self,", "x[1], 'valor_aluguel': x[2], 'agua_casa': x[3], 'num_instalacao_eletrica': x[4], 'cpf': x[5] } for x in", "where id_inq in (select DISTINCT id_inq from contrato where ativo); \"\"\") elif inativos:", "todos_contratos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM contrato; \"\"\") contratos = cursor.fetchall()", "cpf_titular FROM casa c LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao WHERE", "x[5], 'id_inq': x[6] } for x in contratos][0] class PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None,", "aluguel.\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao) VALUES", ", 'deposito': deposito , 'id_contrato': id_contrato } except sqlite3.Error as e: if rollback:", "x[0], 'cpf_inq': x[1], 'nome_inq': x[2], 'rg_inq': x[3] } for x in inquilinos] def", "WHERE c.id_casa NOT IN ( SELECT casa.id_casa from casa JOIN contrato ON contrato.id_casa=", "'id_casa': casa, 'id_inq': inq } except sqlite3.Error as e: if rollback: self.conn.rollback() return", "except sqlite3.Error as e: if rollback: self.conn.rollback() def todas_instalacoes(self): cursor = self.conn.cursor() cursor.execute(\"\"\"", "vazias=False): cursor = self.conn.cursor() if vazias: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao,", "None: raise Exception(\"Necessário prover uma data de vencimento\") if dt_pag is None: raise", "Exception(\"Necessário prover novas informações para o Inquilino\") query = f'''UPDATE inquilino SET {',", "id=None, valor=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário prover um ID\")", "IF NOT EXISTS instalacao_eletrica ( num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY, cpf_titular VARCHAR(11)", "'cpf': x[5] } for x in casas] def altera_casa(self, id=None, commit=False, rollback=False, **kwargs):", "id_casa=None): c = Contrato_DAO(make_connection()) if id_inq and id_inq in [x['id_inq'] for x in", "SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular FROM casa c LEFT JOIN instalacao_eletrica", "id_inq INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS pagamento( id_pag INTEGER", "?' for key in kwargs.keys()])} WHERE id_inq = ?''' try: cursor = self.conn.cursor()", "= ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id, )) if commit: self.conn.commit() except", "prover um número de CPF\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO instalacao_eletrica", "contratos] def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False): if id is None: raise Exception(\"Necessário", "contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq) VALUES (?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato, dia_vencimento, casa,", "casa is None: raise Exception(\"Necessário escolher uma casa\") if inq is None: raise", "SET valor = ? WHERE id_contrato = ?''' print(query) try: cursor = self.conn.cursor()", "f'''UPDATE contrato SET valor = ? WHERE id_contrato = ?''' print(query) try: cursor", "in inquilinos] def altera_inquilino(self, id=None, commit=False, rollback=False, **kwargs): if id is None: raise", "raise Exception(\"Necessário prover um ID\") query = '''UPDATE contrato SET ativo = 0", "raise InquilinoException() if id_casa and id_casa in [x['id_casa'] for x in c.todos_contratos() if", "valor_aluguel_casa, agua_casa, num_instalacao) VALUES (?,?,?,?) \"\"\", (nome, valor_aluguel, agua, instalacao_eletrica)) if commit: self.conn.commit()", "inquilino( id_inq INTEGER NOT NULL PRIMARY KEY, cpf_inq VARCHAR(11) NOT NULL UNIQUE, nome_inq", "x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] } for x in", "'' } = ?\" for key in kwargs.keys()])} WHERE id_casa = ?''' #", "número de CPF\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO instalacao_eletrica VALUES (?,", "commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def ativa_contrato(self, id=None, commit=False,", "? ''' # return None try: cursor = self.conn.cursor() cursor.execute(query, (cpf, num_instalacao)) if", "x[3], 'dia_venc_aluguel': x[4], 'id_casa': x[5], 'id_inq': x[6] } for x in contratos][0] class", "dt_venc is None: raise Exception(\"Necessário prover uma data de vencimento\") if dt_pag is", "try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao) VALUES (?,?,?,?)", "* from inquilino where id_inq not in (select DISTINCT id_inq from contrato where", "instalacao_eletrica SET cpf_titular = ? WHERE num_instalacao = ? ''' # return None", "agua, instalacao_eletrica)) if commit: self.conn.commit() return { 'id_casa': cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel': valor_aluguel,", "pagamentos] def start_db(conn): cursor = conn.cursor() cursor.executescript(\"\"\" CREATE TABLE IF NOT EXISTS instalacao_eletrica", "CREATE TABLE IF NOT EXISTS contrato( id_contrato INTEGER NOT NULL PRIMARY KEY, valor", "id_casa INTEGER NOT NULL, id_inq INTEGER NOT NULL ); CREATE TABLE IF NOT", "NULL, dt_fim_contrato DATE NOT NULL, dia_venc_aluguel INTEGER NOT NULL, id_casa INTEGER NOT NULL,", "Exception(\"Necessário prover um valor de aluguel para o contrato\") if dia_vencimento is None:", "VALUES (?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato, dia_vencimento, casa, inq)) if commit: self.conn.commit() return {", "'id_inq': x[0], 'cpf_inq': x[1], 'nome_inq': x[2], 'rg_inq': x[3] } for x in inquilinos]", "x[3] , 'id_contrato': x[4] } for x in pagamentos] def todos_pagamentos_contrato(self, id_contrato): cursor", "f'''UPDATE inquilino SET {', '.join([f'{key}_inq = ?' for key in kwargs.keys()])} WHERE id_inq", "rollback: self.conn.rollback() return None def todos_pagamentos(self): cursor = self.conn.cursor() cursor.execute(\"\"\" SELECT * FROM", "ativo = 1 WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id,))", "informações para o Inquilino\") query = f'''UPDATE casa SET {', '.join([f\"{key}{'_casa' if key", "instalacao_eletrica i ON c.num_instalacao = i.num_instalacao; \"\"\") casas = cursor.fetchall() return [{ 'id_casa':", "prover uma data de vencimento\") if dt_pag is None: raise Exception(\"Necessário prover uma", "(?,?,?,?,?,?) \"\"\", (valor, ativo,fim_contrato, dia_vencimento, casa, inq)) if commit: self.conn.commit() return { 'id_contrato':", "pagamento( id_pag INTEGER NOT NULL PRIMARY KEY, dt_venc VARCHAR(23) NOT NULL, dt_pag VARCHAR(23),", "return None def _valida(self, id_inq=None, id_casa=None): c = Contrato_DAO(make_connection()) if id_inq and id_inq", "Inquilino\") query = f'''UPDATE inquilino SET {', '.join([f'{key}_inq = ?' for key in", "NOT NULL PRIMARY KEY, dt_venc VARCHAR(23) NOT NULL, dt_pag VARCHAR(23), deposito INTEGER NOT", "num_instalacao) VALUES (?,?,?,?) \"\"\", (nome, valor_aluguel, agua, instalacao_eletrica)) if commit: self.conn.commit() return {", "= self.conn.cursor() cursor.execute(\"\"\" INSERT INTO casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao) VALUES (?,?,?,?) \"\"\", (nome,", "prover uma data de pagamento\") try: cursor = self.conn.cursor() cursor.execute(\"\"\" INSERT INTO pagamento(dt_venc,", "} for x in pagamentos] def start_db(conn): cursor = conn.cursor() cursor.executescript(\"\"\" CREATE TABLE", "contratos = cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo': x[2], 'dt_fim_contrato': x[3],", "ativos: cursor.execute(\"\"\" select * from inquilino where id_inq in (select DISTINCT id_inq from", "de vencimento\") if casa is None: raise Exception(\"Necessário escolher uma casa\") if inq", "cursor = self.conn.cursor() if vazias: cursor.execute(\"\"\" SELECT c.id_casa, nome_casa, valor_aluguel_casa, agua_casa, i.num_instalacao, cpf_titular", "WHERE id_contrato = ?''' try: cursor = self.conn.cursor() cursor.execute(query, (id, )) if commit:", "rollback=False): if id is None: raise Exception(\"Necessário prover um ID\") C = self.get_contrato(id)", "dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False): if id_contrato is None: raise Exception(\"Necessário prover um", "try: cursor = self.conn.cursor() cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id])) if", "UNIQUE, nome_inq VARCHAR(40) NOT NULL, rg_inq VARCHAR(10) NOT NULL ); CREATE TABLE IF", "contrato( id_contrato INTEGER NOT NULL PRIMARY KEY, valor REAL NOT NULL, ativo INTEGER", "ID\") if valor is None: raise Exception(\"Necessário prover um valor\") query = f'''UPDATE", "NULL, rg_inq VARCHAR(10) NOT NULL ); CREATE TABLE IF NOT EXISTS contrato( id_contrato", "is None: raise Exception(\"Necessário prover um ID\") C = self.get_contrato(id) self._valida(C['id_inq'], C['id_casa'] )", "i ON c.num_instalacao = i.num_instalacao WHERE c.id_casa NOT IN ( SELECT casa.id_casa from", "ON contrato.id_casa= casa.id_casa WHERE ativo ) GROUP BY c.id_casa; \"\"\") else: cursor.execute(\"\"\" SELECT", "dt_pag, deposito, id_contrato)) if commit: self.conn.commit() return { 'id_pag': cursor.lastrowid , 'dt_venc': dt_venc", "self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback() def inativa_contrato(self, id=None, commit=False, rollback=False):", "SELECT * FROM pagamento; \"\"\") pagamentos = cursor.fetchall() return [{ 'id_pag': x[0] ,", "**kwargs): if id is None: raise Exception(\"Necessário prover um ID\") if not len(kwargs):", "Exception(\"Conflito\") elif ativos: cursor.execute(\"\"\" select * from inquilino where id_inq in (select DISTINCT", "e: if rollback: self.conn.rollback() class Instalacao_Eletrica_DAO(DAO): def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False): if", "inativos: cursor.execute(\"\"\" select * from inquilino where id_inq not in (select DISTINCT id_inq", "(cpf, nome, rg)) if commit: self.conn.commit() return { 'id_inq': cursor.lastrowid, 'cpf_inq': cpf, 'nome_inq':", "VARCHAR(20) NOT NULL PRIMARY KEY, cpf_titular VARCHAR(11) NOT NULL UNIQUE ); CREATE TABLE", "dt_pag, deposito, id_contrato) VALUES (?, ?, ?, ?) \"\"\", (dt_venc, dt_pag, deposito, id_contrato))", "'id_casa': x[0], 'nome_casa': x[1], 'valor_aluguel': x[2], 'agua_casa': x[3], 'num_instalacao_eletrica': x[4], 'cpf': x[5] }", "cursor.execute(query, (valor, id)) if commit: self.conn.commit() except sqlite3.Error as e: if rollback: self.conn.rollback()", "* FROM contrato WHERE id_contrato = ?; \"\"\", tuple([id])) contratos = cursor.fetchall() return", "NULL, id_inq INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS pagamento( id_pag", "id_contrato)) if commit: self.conn.commit() return { 'id_pag': cursor.lastrowid , 'dt_venc': dt_venc , 'dt_pag':", "INTEGER NOT NULL PRIMARY KEY, nome_casa INTEGER NOT NULL, valor_aluguel_casa INTEGER NOT NULL,", "x[5], 'id_inq': x[6] } for x in contratos] def altera_valor_contrato(self, id=None, valor=None, commit=False,", "= cursor.fetchall() return [{ 'id_contrato': x[0], 'valor': x[1], 'ativo': x[2], 'dt_fim_contrato': x[3], 'dia_venc_aluguel':", "contratos][0] class PagamentoDAO(DAO): def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False): if id_contrato", "valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica } except sqlite3.Error as e: if rollback: self.conn.rollback()", "{ 'id_casa': cursor.lastrowid, 'nome_casa': nome, 'valor_aluguel': valor_aluguel, 'agua_casa': agua, 'num_instalacao_eletrica': instalacao_eletrica } except", "(?, ?, ?, ?) \"\"\", (dt_venc, dt_pag, deposito, id_contrato)) if commit: self.conn.commit() return" ]
[ "installation functionality\"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2020, National Renewable Energy Laboratory\"", "<reponame>ptrbortolotti/WISDEM \"\"\"Initialize cable installation functionality\"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2020, National", "= \"<NAME>\" __copyright__ = \"Copyright 2020, National Renewable Energy Laboratory\" __maintainer__ = \"<NAME>\"", "\"<NAME>\" __copyright__ = \"Copyright 2020, National Renewable Energy Laboratory\" __maintainer__ = \"<NAME>\" __email__", "= \"<EMAIL>\" from .array import ArrayCableInstallation from .common import SimpleCable from .export import", "__author__ = \"<NAME>\" __copyright__ = \"Copyright 2020, National Renewable Energy Laboratory\" __maintainer__ =", "National Renewable Energy Laboratory\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" from .array import", "2020, National Renewable Energy Laboratory\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" from .array", "= \"<NAME>\" __email__ = \"<EMAIL>\" from .array import ArrayCableInstallation from .common import SimpleCable", "= \"Copyright 2020, National Renewable Energy Laboratory\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\"", "Renewable Energy Laboratory\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" from .array import ArrayCableInstallation", "\"<NAME>\" __email__ = \"<EMAIL>\" from .array import ArrayCableInstallation from .common import SimpleCable from", "__email__ = \"<EMAIL>\" from .array import ArrayCableInstallation from .common import SimpleCable from .export", "Energy Laboratory\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" from .array import ArrayCableInstallation from", "cable installation functionality\"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2020, National Renewable Energy", "\"Copyright 2020, National Renewable Energy Laboratory\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" from", "Laboratory\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" from .array import ArrayCableInstallation from .common", "\"<EMAIL>\" from .array import ArrayCableInstallation from .common import SimpleCable from .export import ExportCableInstallation", "\"\"\"Initialize cable installation functionality\"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2020, National Renewable", "functionality\"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2020, National Renewable Energy Laboratory\" __maintainer__", "__maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" from .array import ArrayCableInstallation from .common import", "__copyright__ = \"Copyright 2020, National Renewable Energy Laboratory\" __maintainer__ = \"<NAME>\" __email__ =" ]
[ "= 'de' msgs = [_('German'), _('English')] msgs = translate_for_user(user, *msgs) assert msgs ==", "mail module. \"\"\" from django.contrib.auth import get_user_model from django.utils.translation import ugettext_lazy as _", "user.profile.language = 'de' msgs = [_('German'), _('English')] msgs = translate_for_user(user, *msgs) assert msgs", "for mail module. \"\"\" from django.contrib.auth import get_user_model from django.utils.translation import ugettext_lazy as", "User = get_user_model() user = User.objects.get(username='test') user.profile.language = 'de' msgs = [_('German'), _('English')]", "msgs = [_('German'), _('English')] msgs = translate_for_user(user, *msgs) assert msgs == ['Deutsch', 'Englisch']", "= get_user_model() user = User.objects.get(username='test') user.profile.language = 'de' msgs = [_('German'), _('English')] msgs", "class TestTransUser(object): def test(self): User = get_user_model() user = User.objects.get(username='test') user.profile.language = 'de'", "django.utils.translation import ugettext_lazy as _ from ..mail import translate_for_user class TestTransUser(object): def test(self):", "\"\"\" from django.contrib.auth import get_user_model from django.utils.translation import ugettext_lazy as _ from ..mail", "from django.contrib.auth import get_user_model from django.utils.translation import ugettext_lazy as _ from ..mail import", "<reponame>mirzazulfan/nsupdate.info \"\"\" Tests for mail module. \"\"\" from django.contrib.auth import get_user_model from django.utils.translation", "_ from ..mail import translate_for_user class TestTransUser(object): def test(self): User = get_user_model() user", "..mail import translate_for_user class TestTransUser(object): def test(self): User = get_user_model() user = User.objects.get(username='test')", "def test(self): User = get_user_model() user = User.objects.get(username='test') user.profile.language = 'de' msgs =", "= User.objects.get(username='test') user.profile.language = 'de' msgs = [_('German'), _('English')] msgs = translate_for_user(user, *msgs)", "'de' msgs = [_('German'), _('English')] msgs = translate_for_user(user, *msgs) assert msgs == ['Deutsch',", "from ..mail import translate_for_user class TestTransUser(object): def test(self): User = get_user_model() user =", "\"\"\" Tests for mail module. \"\"\" from django.contrib.auth import get_user_model from django.utils.translation import", "from django.utils.translation import ugettext_lazy as _ from ..mail import translate_for_user class TestTransUser(object): def", "import translate_for_user class TestTransUser(object): def test(self): User = get_user_model() user = User.objects.get(username='test') user.profile.language", "django.contrib.auth import get_user_model from django.utils.translation import ugettext_lazy as _ from ..mail import translate_for_user", "user = User.objects.get(username='test') user.profile.language = 'de' msgs = [_('German'), _('English')] msgs = translate_for_user(user,", "User.objects.get(username='test') user.profile.language = 'de' msgs = [_('German'), _('English')] msgs = translate_for_user(user, *msgs) assert", "module. \"\"\" from django.contrib.auth import get_user_model from django.utils.translation import ugettext_lazy as _ from", "as _ from ..mail import translate_for_user class TestTransUser(object): def test(self): User = get_user_model()", "Tests for mail module. \"\"\" from django.contrib.auth import get_user_model from django.utils.translation import ugettext_lazy", "TestTransUser(object): def test(self): User = get_user_model() user = User.objects.get(username='test') user.profile.language = 'de' msgs", "get_user_model() user = User.objects.get(username='test') user.profile.language = 'de' msgs = [_('German'), _('English')] msgs =", "translate_for_user class TestTransUser(object): def test(self): User = get_user_model() user = User.objects.get(username='test') user.profile.language =", "test(self): User = get_user_model() user = User.objects.get(username='test') user.profile.language = 'de' msgs = [_('German'),", "import ugettext_lazy as _ from ..mail import translate_for_user class TestTransUser(object): def test(self): User", "get_user_model from django.utils.translation import ugettext_lazy as _ from ..mail import translate_for_user class TestTransUser(object):", "ugettext_lazy as _ from ..mail import translate_for_user class TestTransUser(object): def test(self): User =", "import get_user_model from django.utils.translation import ugettext_lazy as _ from ..mail import translate_for_user class" ]
[ "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False,", "type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ],", "= _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1,", "name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "_reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponseWrapper) }) _sym_db.RegisterMessage(ListToolResponseWrapper)", "index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[],", "'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper',", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type=1, label=1,", "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction',", "full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list',", "google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf", "_LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0,", "# source: list_tool.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1,", "name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7,", "full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "= _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__' :", "_LISTTOOLREQUEST, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), {", ": 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR' :", "from google.protobuf import message as _message from google.protobuf import reflection as _reflection from", "name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3, number=4, type=11,", "ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponseWrapper)", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None,", "or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message", "@@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER, '__module__' :", ") _LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page',", "import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1,", "_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor", "_LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLREQUEST,", ", dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail',", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'),", "extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=208,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2, number=3, type=9, cpp_type=9,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8, cpp_type=7, label=1,", "label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error',", "\\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5,", "has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4,", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None,", "name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=304, serialized_end=418, )", "_descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None,", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper',", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10, label=3,", "], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=304,", "oneofs=[ ], serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None,", "default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5,", "default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4,", "index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "full_name='basic.ListToolRequest.tags', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "{ 'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse =", "-*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT", "has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3,", "full_name='basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from", "default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7,", "\\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR,", "number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2,", "_descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False,", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None,", "DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR'", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0,", "'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSE,", "full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1, label=1,", "# @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR =", "has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[],", "full_name='basic.ListToolRequest.category', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from", "], serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4,", "(_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponseWrapper) }) _sym_db.RegisterMessage(ListToolResponseWrapper) #", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper',", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1,", "extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR,", "type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(", "_LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest =", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9, cpp_type=9,", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3, number=4,", "'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER,", "full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None,", "\\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') ,", "from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tool_sdk.model.tool", "\\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04", "cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list',", "as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2", "as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04", "_descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False,", "serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(", "tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05", "_sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__' : 'list_tool_pb2' #", "serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(", "(_message.Message,), { 'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse", "has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1,", "_descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0, number=1, type=5,", "enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type =", "label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain',", "label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ],", "default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[", "compiler. DO NOT EDIT! # source: list_tool.proto import sys _b=sys.version_info[0]<3 and (lambda x:x)", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'),", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9,", "= _symbol_database.Default() from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto', package='basic',", "full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "full_name='basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0,", "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data',", "default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2,", "file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ],", "label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data',", "_descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None,", "index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "\\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor(", "reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db =", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False,", "index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "_sym_db = _symbol_database.Default() from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto',", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9,", "number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "= _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponseWrapper) })", "= _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest", "index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER", ") _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code',", "index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False,", "default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3,", "# Generated by the protocol buffer compiler. DO NOT EDIT! # source: list_tool.proto", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None,", "'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse',", "}) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2'", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3',", "_reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest)", "has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[],", "= _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,),", "_descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None,", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None,", "name='list', full_name='basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False,", "= _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) })", "fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None,", "(lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf", "default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[", "index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False,", "index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None,", "\\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03", "has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3,", "full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0,", "enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER =", "default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6,", "# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False,", "_LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0,", "label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ],", "= _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1,", "tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01", "serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code',", "_descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None,", "number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9,", "name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5, cpp_type=1,", "descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9, cpp_type=9,", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2, number=3, type=9,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9, cpp_type=9, label=1,", "import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tool_sdk.model.tool import tool_pb2", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error',", "full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2,", "_descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None,", "cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible',", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False,", "label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total',", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None,", "oneofs=[ ], serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None,", "number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5,", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False,", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type =", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2, number=3, type=5,", "full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(", "full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "_LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__' : 'list_tool_pb2'", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'),", "], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210,", "fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "\\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest',", "index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None,", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse',", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1, label=1,", "name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None,", "fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=304, serialized_end=418,", "label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible',", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None,", "(_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper", "name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2,", "cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size',", "\\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3')", "= _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0, number=1,", "index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1,", "label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags',", "number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT!", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None,", "\\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(", "DO NOT EDIT! # source: list_tool.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or", "name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None,", "has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6,", "the protocol buffer compiler. DO NOT EDIT! # source: list_tool.proto import sys _b=sys.version_info[0]<3", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL", "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions',", "as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False,", "}) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER, '__module__' : 'list_tool_pb2'", "cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin',", "DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__'", "= _descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type=1,", "default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2, number=3,", "serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02", "_descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1,", "_descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ],", "_descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None,", "_LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper']", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2, number=3, type=9, cpp_type=9, label=1,", "name='category', full_name='basic.ListToolRequest.category', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[", "cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags',", "x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message", "label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction',", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False,", "tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03", "@@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor(", "\\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None,", "serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01", "by the protocol buffer compiler. DO NOT EDIT! # source: list_tool.proto import sys", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False,", "_descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None,", "cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain',", "label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category',", "full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7, label=1,", "name='page', full_name='basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse)", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ],", "_LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0,", "= _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR' :", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None,", ": _LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,),", "name='total', full_name='basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "_descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type", "\\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST", "number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "_descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1, type=8,", "EDIT! # source: list_tool.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size', index=1, number=2,", ": 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR' :", "number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "\\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest',", "_message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database", "'__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR'", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False,", "number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "# @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER, '__module__'", "label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='basic.ListToolResponse.page_size',", "], serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST", "serialized_end=208, ) _LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page',", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None,", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None,", "google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports)", "buffer compiler. DO NOT EDIT! # source: list_tool.proto import sys _b=sys.version_info[0]<3 and (lambda", "_descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5,", "google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tool_sdk.model.tool import", "_sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER, '__module__' : 'list_tool_pb2' #", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8,", "extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=302,", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11,", "name='detail', full_name='basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59,", "index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "_reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse)", "name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "full_name='basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type", "cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None,", ": _LISTTOOLREQUEST, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,),", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8,", "default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None,", "_descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2,", "_symbol_database.Default() from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3',", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse',", "(lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE = _descriptor.Descriptor(", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0,", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8,", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None,", "\\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8, cpp_type=7,", "number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9,", "serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse']", "list_tool.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[],", "name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06", "cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total',", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor(", "extension_ranges=[], oneofs=[ ], serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest']", "cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category',", "{ 'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper =", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'),", "label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin',", "_LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), {", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False,", "], serialized_start=210, serialized_end=302, ) _LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor( name='ListToolResponseWrapper', full_name='basic.ListToolResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "_descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=208, )", "dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST = _descriptor.Descriptor( name='ListToolRequest', full_name='basic.ListToolRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='detail', full_name='basic.ListToolRequest.detail',", "@@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__' :", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8, cpp_type=7,", "extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR,", "_LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR)", "number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "oneofs=[ ], serialized_start=304, serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] =", "number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "serialized_end=418, ) _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] =", "], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=302, )", "type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ],", "Generated by the protocol buffer compiler. DO NOT EDIT! # source: list_tool.proto import", "has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1,", "google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'),", "\\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,]) _LISTTOOLREQUEST =", "symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tool_sdk.model.tool import tool_pb2 as", "sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as", "from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='list_tool.proto', package='basic', syntax='proto3', serialized_options=None,", "'__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR'", "-*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source:", "utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! #", "= _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) })", "ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLREQUEST, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolRequest)", "syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04", "as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default()", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='total', full_name='basic.ListToolResponse.total', index=2, number=3,", "name='ListToolResponse', full_name='basic.ListToolResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='page', full_name='basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1,", "= tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] =", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7, type=9, cpp_type=9, label=1,", "message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None,", "has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[],", "as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as", "_reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from", "full_name='basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "_sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2' #", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE = _descriptor.Descriptor( name='ListToolResponse', full_name='basic.ListToolResponse', filename=None,", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8, cpp_type=7, label=1,", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None,", "serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11, cpp_type=10,", "number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "name='code', full_name='basic.ListToolResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "_descriptor.FieldDescriptor( name='error', full_name='basic.ListToolResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "{ 'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponseWrapper) }) _sym_db.RegisterMessage(ListToolResponseWrapper) # @@protoc_insertion_point(module_scope)", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='list', full_name='basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10,", "import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import", "type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(", "has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions', index=3,", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7, type=9,", "from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database #", "_descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection", ") _LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL _LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE", "DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest',", "label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ],", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='basic.ListToolRequest.tags', index=6, number=7, type=9, cpp_type=9,", "source: list_tool.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf", "# @@protoc_insertion_point(class_scope:basic.ListToolRequest) }) _sym_db.RegisterMessage(ListToolRequest) ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTTOOLRESPONSE, '__module__'", "_LISTTOOLRESPONSE, '__module__' : 'list_tool_pb2' # @@protoc_insertion_point(class_scope:basic.ListToolResponse) }) _sym_db.RegisterMessage(ListToolResponse) ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), {", "cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[", "protocol buffer compiler. DO NOT EDIT! # source: list_tool.proto import sys _b=sys.version_info[0]<3 and", "enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE =", "_symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2 DESCRIPTOR", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='basic.ListToolResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None,", "import message as _message from google.protobuf import reflection as _reflection from google.protobuf import", "_descriptor.FieldDescriptor( name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='category', full_name='basic.ListToolRequest.category', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None,", "label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='permissions', full_name='basic.ListToolRequest.permissions',", "NOT EDIT! # source: list_tool.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda", "\\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03 \\x01(\\x05\\x12\\x18\\n\\x04list\\x18\\x04 \\x03(\\x0b\\x32\\n.tool.Tool\\\"r\\n\\x17ListToolResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12%\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x17.basic.ListToolResponseb\\x06proto3') , dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,])", "package='basic', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0flist_tool.proto\\x12\\x05\\x62\\x61sic\\x1a\\x1etool_sdk/model/tool/tool.proto\\\"\\x95\\x01\\n\\x0fListToolRequest\\x12\\x0e\\n\\x06\\x64\\x65tail\\x18\\x01 \\x01(\\x08\\x12\\x0e\\n\\x06plugin\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x63\\x61tegory\\x18\\x03 \\x01(\\t\\x12\\x13\\n\\x0bpermissions\\x18\\x04 \\x01(\\t\\x12\\x16\\n\\x0eonlyProduction\\x18\\x05 \\x01(\\x08\\x12\\x15\\n\\rshowInvisible\\x18\\x06 \\x01(\\x08\\x12\\x0c\\n\\x04tags\\x18\\x07 \\x01(\\t\\\"\\\\\\n\\x10ListToolResponse\\x12\\x0c\\n\\x04page\\x18\\x01 \\x01(\\x05\\x12\\x11\\n\\tpage_size\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05total\\x18\\x03", "nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=208, ) _LISTTOOLRESPONSE", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='plugin', full_name='basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7, label=1," ]
[ "parser.parse_args() db_path = args.path if db_path is None: print(\"ERROR, use with --path\") sys.exit(1)", "import sys from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import", "stops report from HRDF DB') parser.add_argument('-p', '--path', help='Path to HRDF DB') args =", "report from HRDF DB') parser.add_argument('-p', '--path', help='Path to HRDF DB') args = parser.parse_args()", "DB') args = parser.parse_args() db_path = args.path if db_path is None: print(\"ERROR, use", "from inc.HRDF.db_helpers import compute_db_tables_report parser = argparse.ArgumentParser(description = 'Generate stops report from HRDF", "re import sys from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers", "import re import sys from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from", "args = parser.parse_args() db_path = args.path if db_path is None: print(\"ERROR, use with", "db_path = args.path if db_path is None: print(\"ERROR, use with --path\") sys.exit(1) compute_db_tables_report(db_path=db_path)", "import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import compute_db_tables_report parser = argparse.ArgumentParser(description = 'Generate stops report", "inc.HRDF.db_helpers import compute_db_tables_report parser = argparse.ArgumentParser(description = 'Generate stops report from HRDF DB')", "os import re import sys from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path", "sys from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import compute_db_tables_report", "= argparse.ArgumentParser(description = 'Generate stops report from HRDF DB') parser.add_argument('-p', '--path', help='Path to", "inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import compute_db_tables_report parser =", "compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import compute_db_tables_report parser = argparse.ArgumentParser(description = 'Generate stops report from", "help='Path to HRDF DB') args = parser.parse_args() db_path = args.path if db_path is", "HRDF DB') parser.add_argument('-p', '--path', help='Path to HRDF DB') args = parser.parse_args() db_path =", "= 'Generate stops report from HRDF DB') parser.add_argument('-p', '--path', help='Path to HRDF DB')", "HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import compute_db_tables_report parser = argparse.ArgumentParser(description =", "import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import compute_db_tables_report parser = argparse.ArgumentParser(description", "= parser.parse_args() db_path = args.path if db_path is None: print(\"ERROR, use with --path\")", "import compute_db_tables_report parser = argparse.ArgumentParser(description = 'Generate stops report from HRDF DB') parser.add_argument('-p',", "argparse import os import re import sys from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers", "compute_db_tables_report parser = argparse.ArgumentParser(description = 'Generate stops report from HRDF DB') parser.add_argument('-p', '--path',", "HRDF DB') args = parser.parse_args() db_path = args.path if db_path is None: print(\"ERROR,", "DB') parser.add_argument('-p', '--path', help='Path to HRDF DB') args = parser.parse_args() db_path = args.path", "from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import compute_db_tables_report parser = argparse.ArgumentParser(description = 'Generate", "import os import re import sys from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import", "to HRDF DB') args = parser.parse_args() db_path = args.path if db_path is None:", "inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import compute_db_tables_report parser = argparse.ArgumentParser(description = 'Generate stops", "parser = argparse.ArgumentParser(description = 'Generate stops report from HRDF DB') parser.add_argument('-p', '--path', help='Path", "'Generate stops report from HRDF DB') parser.add_argument('-p', '--path', help='Path to HRDF DB') args", "import argparse import os import re import sys from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from", "parser.add_argument('-p', '--path', help='Path to HRDF DB') args = parser.parse_args() db_path = args.path if", "from HRDF DB') parser.add_argument('-p', '--path', help='Path to HRDF DB') args = parser.parse_args() db_path", "'--path', help='Path to HRDF DB') args = parser.parse_args() db_path = args.path if db_path", "from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path from inc.HRDF.db_helpers import compute_db_tables_report parser", "argparse.ArgumentParser(description = 'Generate stops report from HRDF DB') parser.add_argument('-p', '--path', help='Path to HRDF" ]
[ "QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\",", "= QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout =", "1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800,", "self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name", "QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton", "1, 1) self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1) self.clearButton =", "18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0,", "= QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state = QtWidgets.QLabel()", "1) self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1)", "QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.openfileButton.setText(_translate(\"MainWindow\", \"open\")) self.saveButton.setText('save') self.nextButton.setText('next')", "generated from reading ui file 'mainwindow.ui' # # Created by: PyQt5 UI code", "MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout =", "file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def", "PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600)", "# WARNING! All changes made in this file will be lost! from PyQt5", "800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos,", "MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state", "Created by: PyQt5 UI code generator 5.13.0 # # WARNING! All changes made", "QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points = QtWidgets.QLabel()", "All changes made in this file will be lost! from PyQt5 import QtCore,", "= QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow)", "self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1) self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0, 1,", "def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)", "1) self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1)", "self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))", "1) self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0,", "= QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate", "self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1) self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0, 1,", "MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout()", "0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar)", "self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar =", "MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout", "QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name = QtWidgets.QLabel()", "1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0,", "changes made in this file will be lost! from PyQt5 import QtCore, QtGui,", "Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 =", "def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.openfileButton.setText(_translate(\"MainWindow\", \"open\")) self.saveButton.setText('save') self.nextButton.setText('next') self.clearButton.setText('clear')", "Form implementation generated from reading ui file 'mainwindow.ui' # # Created by: PyQt5", "self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout,", "QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1)", "self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar", "QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1) self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4,", "QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout()", "1, 1) self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1) self.lastButton =", "setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\")", "MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1)", "= QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points =", "QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow)", "from reading ui file 'mainwindow.ui' # # Created by: PyQt5 UI code generator", "3, 0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow)", "= QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1,", "be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow):", "= QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar)", "QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state,", "self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1) self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton')", "self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1) self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0,", "# Created by: PyQt5 UI code generator 5.13.0 # # WARNING! All changes", "self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1) self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0,", "self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points,", "self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget)", "1, 0, 1, 1) self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1)", "self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.openfileButton.setText(_translate(\"MainWindow\", \"open\")) self.saveButton.setText('save')", "file 'mainwindow.ui' # # Created by: PyQt5 UI code generator 5.13.0 # #", "from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800,", "self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0,", "= QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0)", "= QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\")", "MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow)", "QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\")", "class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2", "utf-8 -*- # Form implementation generated from reading ui file 'mainwindow.ui' # #", "self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow)", "WARNING! All changes made in this file will be lost! from PyQt5 import", "# # Created by: PyQt5 UI code generator 5.13.0 # # WARNING! All", "self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1,", "QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1) self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1,", "in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class", "0, 0, 1, 1) self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1)", "self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state =", "QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton,", "# Form implementation generated from reading ui file 'mainwindow.ui' # # Created by:", "will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self,", "self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout", "= QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1) self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton,", "self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton =", "self.gridLayout.setObjectName(\"gridLayout\") self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0,", "1, 1) self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1) self.nextButton =", "# -*- coding: utf-8 -*- # Form implementation generated from reading ui file", "QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1) self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3,", "this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object):", "-*- # Form implementation generated from reading ui file 'mainwindow.ui' # # Created", "self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1) self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0,", "coding: utf-8 -*- # Form implementation generated from reading ui file 'mainwindow.ui' #", "self.buttonLayout = QtWidgets.QGridLayout() self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0,", "self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1) self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0, 1,", "1, 1) self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0,", "code generator 5.13.0 # # WARNING! All changes made in this file will", "# # WARNING! All changes made in this file will be lost! from", "0, 800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel()", "= QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1) self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton,", "4, 0, 1, 1) self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1)", "self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points", "reading ui file 'mainwindow.ui' # # Created by: PyQt5 UI code generator 5.13.0", "self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name,", "= QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1) self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton,", "1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar =", "2, 0, 1, 1) self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1)", "self.gridLayout.addLayout(self.buttonLayout, 0, 0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1) self.saveButton", "ui file 'mainwindow.ui' # # Created by: PyQt5 UI code generator 5.13.0 #", "0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18)) self.menubar.setObjectName(\"menubar\")", "lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\")", "0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0,", "= QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1) self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name =", "QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos", "generator 5.13.0 # # WARNING! All changes made in this file will be", "= QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)", "0, 1, 1) self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1) self.clearButton", "1) self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1) self.nextButton = QtWidgets.QPushButton(self.centralwidget)", "0, 1, 1) self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1) self.lastButton", "self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\")", "self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1) self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0, 1,", "'mainwindow.ui' # # Created by: PyQt5 UI code generator 5.13.0 # # WARNING!", "0, 0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1) self.saveButton =", "self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self,", "retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.openfileButton.setText(_translate(\"MainWindow\", \"open\")) self.saveButton.setText('save') self.nextButton.setText('next') self.clearButton.setText('clear') self.lastButton.setText('previous')", "1, 1) self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4,", "1) self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton') self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1) self.lastButton = QtWidgets.QPushButton(self.centralwidget)", "-*- coding: utf-8 -*- # Form implementation generated from reading ui file 'mainwindow.ui'", "0, 1, 1) self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1) self.nextButton", "self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18))", "600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout_2.setObjectName(\"gridLayout_2\") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName(\"gridLayout\")", "import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget", "self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1,", "= QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1) self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton,", "UI code generator 5.13.0 # # WARNING! All changes made in this file", "1) self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate", "2, 1) self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow):", "by: PyQt5 UI code generator 5.13.0 # # WARNING! All changes made in", "QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2, 1) self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def", "self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos =", "0) self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1) self.saveButton = QtWidgets.QPushButton(self.centralwidget)", "4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.openfileButton.setText(_translate(\"MainWindow\",", "QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(800, 600) self.centralwidget =", "self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1) self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0,", "0, 1, 1) self.lastButton = QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout,", "5.13.0 # # WARNING! All changes made in this file will be lost!", "self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.label_pos = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_pos, 0, 1)", "implementation generated from reading ui file 'mainwindow.ui' # # Created by: PyQt5 UI", "self.openfileButton = QtWidgets.QPushButton(self.centralwidget) self.openfileButton.setObjectName(\"openfileButton\") self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1) self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton')", "QtWidgets.QPushButton(self.centralwidget) self.lastButton.setObjectName('lastButton') self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget)", "made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets", "1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.openfileButton.setText(_translate(\"MainWindow\", \"open\"))", "self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1) self.clearButton = QtWidgets.QPushButton(self.centralwidget) self.clearButton.setObjectName('clearButton')", "QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1) self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2,", "self.saveButton = QtWidgets.QPushButton(self.centralwidget) self.saveButton.setObjectName('saveButton') self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1) self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton')", "self.label_annotation_points = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_annotation_points, 4, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate =", "1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar", "PyQt5 UI code generator 5.13.0 # # WARNING! All changes made in this", "0, 1) self.label_state = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_state, 1, 1) self.label_file_name = QtWidgets.QLabel() self.buttonLayout.addWidget(self.label_file_name, 2,", "1) self.nextButton = QtWidgets.QPushButton(self.centralwidget) self.nextButton.setObjectName('nextButton') self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1) self.clearButton = QtWidgets.QPushButton(self.centralwidget)" ]
[ "= ('title', 'status') extra = 1 model = Post @admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin):", "show_full_result_count = False # 优化显示结果 list_filter = ['title'] actions_on_top = True date_hierarchy =", "def operator(self, obj): return format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description = '操作'", "operator(self, obj): return format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description = '操作' operator.empty_value_display", "= ['title', ] # 编辑页面 fieldsets = ( # 跟fields互斥 ('基础配置', { 'fields':", "= ('name', 'status', 'is_nav',) @admin.register(Tag, site=custom_site) class TagAdmin(BaseOwnerAdmin): list_display = ['name', 'status', 'owner',", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin", "operator.show_description = '操作' operator.empty_value_display = '???' class PostInlineAdmin(admin.TabularInline): fields = ('title', 'status') extra", "'addon'), 'fields': ('tag',), }), ) # 布局作用 filter_horizontal = ('tag',) def operator(self, obj):", "'status', 'content') }), ('高级配置', { 'classes': ('collapse', 'addon'), 'fields': ('tag',), }), ) #", "-*- from __future__ import unicode_literals from django.contrib import admin from .models import Post,", "= False # 优化显示结果 list_filter = ['title'] actions_on_top = True date_hierarchy = 'created_time'", "import reverse from .adminforms import PostAdminForm from typeidea.custom_admin import BaseOwnerAdmin # Register your", "from typeidea.custom_admin import BaseOwnerAdmin # Register your models here. @admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin):", "[PostInlineAdmin,] fields = ('name', 'status', 'is_nav',) @admin.register(Tag, site=custom_site) class TagAdmin(BaseOwnerAdmin): list_display = ['name',", "inlines = [PostInlineAdmin,] fields = ('name', 'status', 'is_nav',) @admin.register(Tag, site=custom_site) class TagAdmin(BaseOwnerAdmin): list_display", "Post, Category, Tag from typeidea.custom_site import custom_site from django.utils.html import format_html from django.core.urlresolvers", "'<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description = '操作' operator.empty_value_display = '???' class PostInlineAdmin(admin.TabularInline):", "__future__ import unicode_literals from django.contrib import admin from .models import Post, Category, Tag", "import PostAdminForm from typeidea.custom_admin import BaseOwnerAdmin # Register your models here. @admin.register(Post, site=custom_site)", "'fields': ('tag',), }), ) # 布局作用 filter_horizontal = ('tag',) def operator(self, obj): return", "# 跟fields互斥 ('基础配置', { 'fields': (('category', 'title'), 'desc', 'status', 'content') }), ('高级配置', {", "Register your models here. @admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin): form = PostAdminForm list_display =", "import format_html from django.core.urlresolvers import reverse from .adminforms import PostAdminForm from typeidea.custom_admin import", "href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description = '操作' operator.empty_value_display = '???' class PostInlineAdmin(admin.TabularInline): fields", "Category, Tag from typeidea.custom_site import custom_site from django.utils.html import format_html from django.core.urlresolvers import", "['title', ] # 编辑页面 fieldsets = ( # 跟fields互斥 ('基础配置', { 'fields': (('category',", "date_hierarchy = 'created_time' list_editable = ['title', ] # 编辑页面 fieldsets = ( #", "class CategoryAdmin(BaseOwnerAdmin): list_display = ['name', 'status','is_nav', 'created_time'] inlines = [PostInlineAdmin,] fields = ('name',", "'???' class PostInlineAdmin(admin.TabularInline): fields = ('title', 'status') extra = 1 model = Post", "编辑页面 fieldsets = ( # 跟fields互斥 ('基础配置', { 'fields': (('category', 'title'), 'desc', 'status',", "'operator'] list_display_links = ['category', 'status'] search_fields = ['title', 'category__name', 'owner__first_name'] save_on_top = False", "= True date_hierarchy = 'created_time' list_editable = ['title', ] # 编辑页面 fieldsets =", "list_filter = ['title'] actions_on_top = True date_hierarchy = 'created_time' list_editable = ['title', ]", "(('category', 'title'), 'desc', 'status', 'content') }), ('高级配置', { 'classes': ('collapse', 'addon'), 'fields': ('tag',),", "coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from .models", ") # 布局作用 filter_horizontal = ('tag',) def operator(self, obj): return format_html( '<a href=\"{}\">编辑</a>',", "'created_time', 'operator'] list_display_links = ['category', 'status'] search_fields = ['title', 'category__name', 'owner__first_name'] save_on_top =", "actions_on_top = True date_hierarchy = 'created_time' list_editable = ['title', ] # 编辑页面 fieldsets", "django.contrib import admin from .models import Post, Category, Tag from typeidea.custom_site import custom_site", "'title'), 'desc', 'status', 'content') }), ('高级配置', { 'classes': ('collapse', 'addon'), 'fields': ('tag',), }),", "CategoryAdmin(BaseOwnerAdmin): list_display = ['name', 'status','is_nav', 'created_time'] inlines = [PostInlineAdmin,] fields = ('name', 'status',", "BaseOwnerAdmin # Register your models here. @admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin): form = PostAdminForm", "obj): return format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description = '操作' operator.empty_value_display =", "= ( # 跟fields互斥 ('基础配置', { 'fields': (('category', 'title'), 'desc', 'status', 'content') }),", "model = Post @admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display = ['name', 'status','is_nav', 'created_time'] inlines", "return format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description = '操作' operator.empty_value_display = '???'", "Tag from typeidea.custom_site import custom_site from django.utils.html import format_html from django.core.urlresolvers import reverse", "('title', 'status') extra = 1 model = Post @admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display", "= False show_full_result_count = False # 优化显示结果 list_filter = ['title'] actions_on_top = True", "'status', 'owner', 'created_time', 'operator'] list_display_links = ['category', 'status'] search_fields = ['title', 'category__name', 'owner__first_name']", "'content') }), ('高级配置', { 'classes': ('collapse', 'addon'), 'fields': ('tag',), }), ) # 布局作用", "models here. @admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin): form = PostAdminForm list_display = ['title', 'category',", "= 1 model = Post @admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display = ['name', 'status','is_nav',", "from django.utils.html import format_html from django.core.urlresolvers import reverse from .adminforms import PostAdminForm from", "custom_site from django.utils.html import format_html from django.core.urlresolvers import reverse from .adminforms import PostAdminForm", "format_html from django.core.urlresolvers import reverse from .adminforms import PostAdminForm from typeidea.custom_admin import BaseOwnerAdmin", "from django.contrib import admin from .models import Post, Category, Tag from typeidea.custom_site import", "= ['title'] actions_on_top = True date_hierarchy = 'created_time' list_editable = ['title', ] #", "False # 优化显示结果 list_filter = ['title'] actions_on_top = True date_hierarchy = 'created_time' list_editable", "list_display = ['name', 'status','is_nav', 'created_time'] inlines = [PostInlineAdmin,] fields = ('name', 'status', 'is_nav',)", "= ['title', 'category__name', 'owner__first_name'] save_on_top = False show_full_result_count = False # 优化显示结果 list_filter", "= Post @admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display = ['name', 'status','is_nav', 'created_time'] inlines =", "{ 'fields': (('category', 'title'), 'desc', 'status', 'content') }), ('高级配置', { 'classes': ('collapse', 'addon'),", "['title', 'category__name', 'owner__first_name'] save_on_top = False show_full_result_count = False # 优化显示结果 list_filter =", "'操作' operator.empty_value_display = '???' class PostInlineAdmin(admin.TabularInline): fields = ('title', 'status') extra = 1", "from django.core.urlresolvers import reverse from .adminforms import PostAdminForm from typeidea.custom_admin import BaseOwnerAdmin #", "= '操作' operator.empty_value_display = '???' class PostInlineAdmin(admin.TabularInline): fields = ('title', 'status') extra =", "= ['title', 'category', 'status', 'owner', 'created_time', 'operator'] list_display_links = ['category', 'status'] search_fields =", "'owner__first_name'] save_on_top = False show_full_result_count = False # 优化显示结果 list_filter = ['title'] actions_on_top", "'created_time' list_editable = ['title', ] # 编辑页面 fieldsets = ( # 跟fields互斥 ('基础配置',", "fields = ('title', 'status') extra = 1 model = Post @admin.register(Category, site=custom_site) class", "= 'created_time' list_editable = ['title', ] # 编辑页面 fieldsets = ( # 跟fields互斥", "args=(obj.id,)) ) operator.show_description = '操作' operator.empty_value_display = '???' class PostInlineAdmin(admin.TabularInline): fields = ('title',", "= ['category', 'status'] search_fields = ['title', 'category__name', 'owner__first_name'] save_on_top = False show_full_result_count =", "# Register your models here. @admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin): form = PostAdminForm list_display", "save_on_top = False show_full_result_count = False # 优化显示结果 list_filter = ['title'] actions_on_top =", "search_fields = ['title', 'category__name', 'owner__first_name'] save_on_top = False show_full_result_count = False # 优化显示结果", "布局作用 filter_horizontal = ('tag',) def operator(self, obj): return format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,))", "False show_full_result_count = False # 优化显示结果 list_filter = ['title'] actions_on_top = True date_hierarchy", "['title'] actions_on_top = True date_hierarchy = 'created_time' list_editable = ['title', ] # 编辑页面", "}), ('高级配置', { 'classes': ('collapse', 'addon'), 'fields': ('tag',), }), ) # 布局作用 filter_horizontal", "admin from .models import Post, Category, Tag from typeidea.custom_site import custom_site from django.utils.html", ".adminforms import PostAdminForm from typeidea.custom_admin import BaseOwnerAdmin # Register your models here. @admin.register(Post,", "unicode_literals from django.contrib import admin from .models import Post, Category, Tag from typeidea.custom_site", "['category', 'status'] search_fields = ['title', 'category__name', 'owner__first_name'] save_on_top = False show_full_result_count = False", "PostInlineAdmin(admin.TabularInline): fields = ('title', 'status') extra = 1 model = Post @admin.register(Category, site=custom_site)", ") operator.show_description = '操作' operator.empty_value_display = '???' class PostInlineAdmin(admin.TabularInline): fields = ('title', 'status')", "import Post, Category, Tag from typeidea.custom_site import custom_site from django.utils.html import format_html from", "'status'] search_fields = ['title', 'category__name', 'owner__first_name'] save_on_top = False show_full_result_count = False #", "1 model = Post @admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display = ['name', 'status','is_nav', 'created_time']", "list_display = ['title', 'category', 'status', 'owner', 'created_time', 'operator'] list_display_links = ['category', 'status'] search_fields", "extra = 1 model = Post @admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display = ['name',", "site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display = ['name', 'status','is_nav', 'created_time'] inlines = [PostInlineAdmin,] fields =", "import BaseOwnerAdmin # Register your models here. @admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin): form =", "# 编辑页面 fieldsets = ( # 跟fields互斥 ('基础配置', { 'fields': (('category', 'title'), 'desc',", "'status') extra = 1 model = Post @admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display =", "import unicode_literals from django.contrib import admin from .models import Post, Category, Tag from", "class PostAdmin(BaseOwnerAdmin): form = PostAdminForm list_display = ['title', 'category', 'status', 'owner', 'created_time', 'operator']", "@admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display = ['name', 'status','is_nav', 'created_time'] inlines = [PostInlineAdmin,] fields", "['title', 'category', 'status', 'owner', 'created_time', 'operator'] list_display_links = ['category', 'status'] search_fields = ['title',", "('name', 'status', 'is_nav',) @admin.register(Tag, site=custom_site) class TagAdmin(BaseOwnerAdmin): list_display = ['name', 'status', 'owner', 'created_time']", "format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description = '操作' operator.empty_value_display = '???' class", "}), ) # 布局作用 filter_horizontal = ('tag',) def operator(self, obj): return format_html( '<a", "django.utils.html import format_html from django.core.urlresolvers import reverse from .adminforms import PostAdminForm from typeidea.custom_admin", "django.core.urlresolvers import reverse from .adminforms import PostAdminForm from typeidea.custom_admin import BaseOwnerAdmin # Register", "form = PostAdminForm list_display = ['title', 'category', 'status', 'owner', 'created_time', 'operator'] list_display_links =", "utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from .models import", "('高级配置', { 'classes': ('collapse', 'addon'), 'fields': ('tag',), }), ) # 布局作用 filter_horizontal =", "reverse from .adminforms import PostAdminForm from typeidea.custom_admin import BaseOwnerAdmin # Register your models", "PostAdmin(BaseOwnerAdmin): form = PostAdminForm list_display = ['title', 'category', 'status', 'owner', 'created_time', 'operator'] list_display_links", "typeidea.custom_admin import BaseOwnerAdmin # Register your models here. @admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin): form", ".models import Post, Category, Tag from typeidea.custom_site import custom_site from django.utils.html import format_html", "# 布局作用 filter_horizontal = ('tag',) def operator(self, obj): return format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change',", "PostAdminForm list_display = ['title', 'category', 'status', 'owner', 'created_time', 'operator'] list_display_links = ['category', 'status']", "here. @admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin): form = PostAdminForm list_display = ['title', 'category', 'status',", "] # 编辑页面 fieldsets = ( # 跟fields互斥 ('基础配置', { 'fields': (('category', 'title'),", "跟fields互斥 ('基础配置', { 'fields': (('category', 'title'), 'desc', 'status', 'content') }), ('高级配置', { 'classes':", "typeidea.custom_site import custom_site from django.utils.html import format_html from django.core.urlresolvers import reverse from .adminforms", "'category__name', 'owner__first_name'] save_on_top = False show_full_result_count = False # 优化显示结果 list_filter = ['title']", "filter_horizontal = ('tag',) def operator(self, obj): return format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) )", "from __future__ import unicode_literals from django.contrib import admin from .models import Post, Category,", "= [PostInlineAdmin,] fields = ('name', 'status', 'is_nav',) @admin.register(Tag, site=custom_site) class TagAdmin(BaseOwnerAdmin): list_display =", "list_display_links = ['category', 'status'] search_fields = ['title', 'category__name', 'owner__first_name'] save_on_top = False show_full_result_count", "= PostAdminForm list_display = ['title', 'category', 'status', 'owner', 'created_time', 'operator'] list_display_links = ['category',", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from", "'category', 'status', 'owner', 'created_time', 'operator'] list_display_links = ['category', 'status'] search_fields = ['title', 'category__name',", "('tag',) def operator(self, obj): return format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description =", "operator.empty_value_display = '???' class PostInlineAdmin(admin.TabularInline): fields = ('title', 'status') extra = 1 model", "= '???' class PostInlineAdmin(admin.TabularInline): fields = ('title', 'status') extra = 1 model =", "PostAdminForm from typeidea.custom_admin import BaseOwnerAdmin # Register your models here. @admin.register(Post, site=custom_site) class", "('基础配置', { 'fields': (('category', 'title'), 'desc', 'status', 'content') }), ('高级配置', { 'classes': ('collapse',", "('collapse', 'addon'), 'fields': ('tag',), }), ) # 布局作用 filter_horizontal = ('tag',) def operator(self,", "True date_hierarchy = 'created_time' list_editable = ['title', ] # 编辑页面 fieldsets = (", "'classes': ('collapse', 'addon'), 'fields': ('tag',), }), ) # 布局作用 filter_horizontal = ('tag',) def", "from .adminforms import PostAdminForm from typeidea.custom_admin import BaseOwnerAdmin # Register your models here.", "@admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin): form = PostAdminForm list_display = ['title', 'category', 'status', 'owner',", "from typeidea.custom_site import custom_site from django.utils.html import format_html from django.core.urlresolvers import reverse from", "site=custom_site) class PostAdmin(BaseOwnerAdmin): form = PostAdminForm list_display = ['title', 'category', 'status', 'owner', 'created_time',", "'desc', 'status', 'content') }), ('高级配置', { 'classes': ('collapse', 'addon'), 'fields': ('tag',), }), )", "('tag',), }), ) # 布局作用 filter_horizontal = ('tag',) def operator(self, obj): return format_html(", "Post @admin.register(Category, site=custom_site) class CategoryAdmin(BaseOwnerAdmin): list_display = ['name', 'status','is_nav', 'created_time'] inlines = [PostInlineAdmin,]", "'status','is_nav', 'created_time'] inlines = [PostInlineAdmin,] fields = ('name', 'status', 'is_nav',) @admin.register(Tag, site=custom_site) class", "import admin from .models import Post, Category, Tag from typeidea.custom_site import custom_site from", "import custom_site from django.utils.html import format_html from django.core.urlresolvers import reverse from .adminforms import", "'owner', 'created_time', 'operator'] list_display_links = ['category', 'status'] search_fields = ['title', 'category__name', 'owner__first_name'] save_on_top", "= ('tag',) def operator(self, obj): return format_html( '<a href=\"{}\">编辑</a>', reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description", "list_editable = ['title', ] # 编辑页面 fieldsets = ( # 跟fields互斥 ('基础配置', {", "( # 跟fields互斥 ('基础配置', { 'fields': (('category', 'title'), 'desc', 'status', 'content') }), ('高级配置',", "优化显示结果 list_filter = ['title'] actions_on_top = True date_hierarchy = 'created_time' list_editable = ['title',", "from .models import Post, Category, Tag from typeidea.custom_site import custom_site from django.utils.html import", "['name', 'status','is_nav', 'created_time'] inlines = [PostInlineAdmin,] fields = ('name', 'status', 'is_nav',) @admin.register(Tag, site=custom_site)", "fields = ('name', 'status', 'is_nav',) @admin.register(Tag, site=custom_site) class TagAdmin(BaseOwnerAdmin): list_display = ['name', 'status',", "class PostInlineAdmin(admin.TabularInline): fields = ('title', 'status') extra = 1 model = Post @admin.register(Category,", "'created_time'] inlines = [PostInlineAdmin,] fields = ('name', 'status', 'is_nav',) @admin.register(Tag, site=custom_site) class TagAdmin(BaseOwnerAdmin):", "your models here. @admin.register(Post, site=custom_site) class PostAdmin(BaseOwnerAdmin): form = PostAdminForm list_display = ['title',", "fieldsets = ( # 跟fields互斥 ('基础配置', { 'fields': (('category', 'title'), 'desc', 'status', 'content')", "= ['name', 'status','is_nav', 'created_time'] inlines = [PostInlineAdmin,] fields = ('name', 'status', 'is_nav',) @admin.register(Tag,", "# 优化显示结果 list_filter = ['title'] actions_on_top = True date_hierarchy = 'created_time' list_editable =", "'fields': (('category', 'title'), 'desc', 'status', 'content') }), ('高级配置', { 'classes': ('collapse', 'addon'), 'fields':", "reverse('cus_site:blog_post_change', args=(obj.id,)) ) operator.show_description = '操作' operator.empty_value_display = '???' class PostInlineAdmin(admin.TabularInline): fields =", "{ 'classes': ('collapse', 'addon'), 'fields': ('tag',), }), ) # 布局作用 filter_horizontal = ('tag',)" ]
[ "from torch import optim from src.sam import SAM __OPTIMIZERS__ = { \"AdaBelief\": AdaBelief,", "else: base_optimizer = optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is not None:", "cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is not None: base_optimizer = __OPTIMIZERS__[base_optimizer_name] else: base_optimizer = optim.__getattribute__(base_optimizer_name)", "base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is not None: return __OPTIMIZERS__[optimizer_name](model.parameters(), **cfg.optimizer.param) else: return optim.__getattribute__(optimizer_name)(model.parameters(),", "\"SAM\": base_optimizer_name = cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is not None: base_optimizer = __OPTIMIZERS__[base_optimizer_name] else:", "cfg.optimizer.name if optimizer_name == \"SAM\": base_optimizer_name = cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is not None:", "SAM } def get_optimizer(cfg, model): optimizer_name = cfg.optimizer.name if optimizer_name == \"SAM\": base_optimizer_name", "def get_optimizer(cfg, model): optimizer_name = cfg.optimizer.name if optimizer_name == \"SAM\": base_optimizer_name = cfg.optimizer.base", "AdaBelief import torch_optimizer from torch import optim from src.sam import SAM __OPTIMIZERS__ =", "} def get_optimizer(cfg, model): optimizer_name = cfg.optimizer.name if optimizer_name == \"SAM\": base_optimizer_name =", "__OPTIMIZERS__[base_optimizer_name] else: base_optimizer = optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is not", "\"SAM\": SAM } def get_optimizer(cfg, model): optimizer_name = cfg.optimizer.name if optimizer_name == \"SAM\":", "SAM __OPTIMIZERS__ = { \"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM } def get_optimizer(cfg,", "src.sam import SAM __OPTIMIZERS__ = { \"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM }", "optimizer_name = cfg.optimizer.name if optimizer_name == \"SAM\": base_optimizer_name = cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is", "torch_optimizer.RAdam, \"SAM\": SAM } def get_optimizer(cfg, model): optimizer_name = cfg.optimizer.name if optimizer_name ==", "SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is not None: return __OPTIMIZERS__[optimizer_name](model.parameters(), **cfg.optimizer.param) else: return", "import torch_optimizer from torch import optim from src.sam import SAM __OPTIMIZERS__ = {", "**cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is not None: return __OPTIMIZERS__[optimizer_name](model.parameters(), **cfg.optimizer.param) else: return optim.__getattribute__(optimizer_name)(model.parameters(), **cfg.optimizer.param)", "__OPTIMIZERS__ = { \"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM } def get_optimizer(cfg, model):", "= cfg.optimizer.name if optimizer_name == \"SAM\": base_optimizer_name = cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is not", "= cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is not None: base_optimizer = __OPTIMIZERS__[base_optimizer_name] else: base_optimizer =", "== \"SAM\": base_optimizer_name = cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is not None: base_optimizer = __OPTIMIZERS__[base_optimizer_name]", "base_optimizer_name = cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is not None: base_optimizer = __OPTIMIZERS__[base_optimizer_name] else: base_optimizer", "not None: base_optimizer = __OPTIMIZERS__[base_optimizer_name] else: base_optimizer = optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param)", "\"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM } def get_optimizer(cfg, model): optimizer_name = cfg.optimizer.name if optimizer_name", "import optim from src.sam import SAM __OPTIMIZERS__ = { \"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam,", "import AdaBelief import torch_optimizer from torch import optim from src.sam import SAM __OPTIMIZERS__", "from adabelief_pytorch import AdaBelief import torch_optimizer from torch import optim from src.sam import", "torch import optim from src.sam import SAM __OPTIMIZERS__ = { \"AdaBelief\": AdaBelief, \"RAdam\":", "get_optimizer(cfg, model): optimizer_name = cfg.optimizer.name if optimizer_name == \"SAM\": base_optimizer_name = cfg.optimizer.base if", "if __OPTIMIZERS__.get(base_optimizer_name) is not None: base_optimizer = __OPTIMIZERS__[base_optimizer_name] else: base_optimizer = optim.__getattribute__(base_optimizer_name) return", "optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is not None: return __OPTIMIZERS__[optimizer_name](model.parameters(), **cfg.optimizer.param)", "from src.sam import SAM __OPTIMIZERS__ = { \"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM", "optim from src.sam import SAM __OPTIMIZERS__ = { \"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\":", "return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is not None: return __OPTIMIZERS__[optimizer_name](model.parameters(), **cfg.optimizer.param) else:", "\"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM } def get_optimizer(cfg, model): optimizer_name = cfg.optimizer.name", "torch_optimizer from torch import optim from src.sam import SAM __OPTIMIZERS__ = { \"AdaBelief\":", "import SAM __OPTIMIZERS__ = { \"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM } def", "base_optimizer = optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is not None: return", "= optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is not None: return __OPTIMIZERS__[optimizer_name](model.parameters(),", "= __OPTIMIZERS__[base_optimizer_name] else: base_optimizer = optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name) is", "= { \"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM } def get_optimizer(cfg, model): optimizer_name", "model): optimizer_name = cfg.optimizer.name if optimizer_name == \"SAM\": base_optimizer_name = cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name)", "__OPTIMIZERS__.get(base_optimizer_name) is not None: base_optimizer = __OPTIMIZERS__[base_optimizer_name] else: base_optimizer = optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(),", "is not None: base_optimizer = __OPTIMIZERS__[base_optimizer_name] else: base_optimizer = optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer,", "base_optimizer = __OPTIMIZERS__[base_optimizer_name] else: base_optimizer = optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if __OPTIMIZERS__.get(optimizer_name)", "if optimizer_name == \"SAM\": base_optimizer_name = cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is not None: base_optimizer", "adabelief_pytorch import AdaBelief import torch_optimizer from torch import optim from src.sam import SAM", "AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM } def get_optimizer(cfg, model): optimizer_name = cfg.optimizer.name if", "None: base_optimizer = __OPTIMIZERS__[base_optimizer_name] else: base_optimizer = optim.__getattribute__(base_optimizer_name) return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param) if", "{ \"AdaBelief\": AdaBelief, \"RAdam\": torch_optimizer.RAdam, \"SAM\": SAM } def get_optimizer(cfg, model): optimizer_name =", "optimizer_name == \"SAM\": base_optimizer_name = cfg.optimizer.base if __OPTIMIZERS__.get(base_optimizer_name) is not None: base_optimizer =" ]
[ "self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.target_map_ph: self.target_map })", "\"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top", "allowed perturbation size equal to {}\" # .format(epsilon)) # return 1., 1., self.original_confidence,", "name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None): w, h,", "counterfactuals, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\": saliency2_flatten =", "image, NET): \"\"\" If the network's prediction is incorrect in the first place,", "np.max(predicted_scores) return False def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten,", "1] = 116.779 MEAN_IMAGE[:, :, :, 2] = 123.68 EPSILON = 1e-12 MIN_INPUT", "shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.original_output = self.NET.predict(test_image[None, :]) _,", "iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\"", "== \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2)", "d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound: proj_ratio = bound / np.linalg.norm(d.flatten(),", "[1, 2, 3] ]): raise ValueError( \"Model's input dimensions is not Compatible with", "self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w", "= self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self,", "saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise", "counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image, NET): \"\"\" If", "name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.original_output = self.NET.predict(test_image[None, :])", "replaced by softplus function (necessary only when the activation function of the original", "The prediction confidence of the perturbed image \"\"\" self.beta_0 = beta_0 self.beta_1 =", "== 4 else [image]}) if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already", "np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET) correlation =", "confidence, perturb_size, cos_distance class IntegratedGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None,", "+ alpha * pert - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm >", "alpha * np.sign(pert) - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound:", "measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0]", "maximum allowed perturbation size equal to {}\" # .format(epsilon)) # return 1., 1.,", "else: raise ValueError(\"Invalid measure!\") return criterion else: return 1 def iterative_attack(self, attack_method, epsilon,", "if self.check_prediction(sess, original_label, test_image, NET): return self.sess = sess self.target_map = target_map self.create_extra_ops(NET,", "tf.stack([ tf.reduce_sum(NET.saliency * x_mesh) / (w * h), tf.reduce_sum(NET.saliency * y_mesh) / (w", "correlation between saliency maps of original and perturbed image center_dislocation: The L2 distance", "[feed], NET.reference_image: self.reference_image, NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4: if", "}) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.reference_image: self.reference_image, NET.label_ph: self.original_label, self.target_map_ph: self.target_map", "Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self, NET, w,", "self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph:", "c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation =", "NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return", "hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk,", "Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores) return False def update_new_image(self, test_image, original_label,", "in range(self.num_steps)]),0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):", "maximum allowed attack iterations alpha: perturbation size in each iteration of the attack", "h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w * h], name='topk_ph') self.mass_center_ph =", "test_image_pert, self.NET) criterion = scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion", "X, y.astype(int), labels class SimpleGradientAttack(object): def __init__(self, mean_image, sess, test_image, original_label, NET, NET2=None,", "center_dislocation = np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))", "self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph:", "np.array([ in_image + np.random.normal(scale=0.1 * (in_image.max() - in_image.min()), size=in_image.shape) for _ in range(self.num_steps)", "self.beta_0 = beta_0 self.beta_1 = beta_1 w, h, c = self.test_image.shape test_image_pert =", "np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio = 1 out_image = self.test_image + d * proj_ratio", "perturbation = np.random.normal(size=(self.num_steps, w, h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess,", "1) == self.original_label: if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET)", "self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess,", "h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center", "c]) elif attack_method == \"target\": self.use_target = True if self.target_map is None: raise", "c]) return np.sign(perturbation) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf): if", "sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, }) elif len(feed.shape) == 4: if hasattr(self,", "only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def check_prediction(self, sess, original_label, image,", "tf.float32, shape=[None, num_class], name='original_output_ph') # only for the manipulation attack self.beta_0_ph = tf.placeholder(tf.float32,", "np.max(predicted_scores) return False def update_new_image(self, test_image, original_label, target_map=None): w, h, c = test_image.shape", "= dic_temp[\"X\"] y[num * 20:num * 20 + 20] = dic_temp[\"y\"] labels =", "h, c]) perturbation_summed = np.mean(perturbation, 0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha,", "tensorflow.python.ops import gen_nn_ops config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth = True MEAN_IMAGE = np.zeros((1, 227,", "measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 -", "first place, attacking has no meaning.\"\"\" predicted_scores = sess.run( NET.output, feed_dict={NET.input: image if", "target_map=None): w, h, c = test_image.shape self.test_image = test_image self.original_label = original_label assert", "# print(\"Iteration : {}\".format(counter)) pert = self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert,", "\"\"\" If the network's prediction is incorrect in the first place, attacking has", "beta_1 min_criterion = 1. last_image = None for counter in range(iters): pert =", "as tf import random import _pickle as pkl import matplotlib.pyplot as plt from", "same structure and weights of the orignal network but with activations replaced by", "# only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self,", "NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map })", "feed, NET.reference_image: self.reference_image, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\") def", "incorrect in the first place, attacking has no meaning.\"\"\" predicted_scores = sess.run( NET.output,", "num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.) def create_counterfactuals(self, in_image): counterfactuals = np.array([ in_image + np.random.uniform(-1,", "reference_image=None, target_map=None, pixel_max=255.): self.radii = radii / (255. / pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image,", "create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals = np.array([ in_image + np.random.normal(scale=0.1 * (in_image.max() - in_image.min()),", "self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation,", "original_label, test_image, self.NET) == False if target_map is not None: self.target_map = target_map", "NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss = tf.reduce_mean( target_dis) * self.beta_0_ph + self.beta_1_ph", "self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label,", "== \"random\": perturbation = np.random.normal(size=(self.num_steps, w, h, c)) elif attack_method == \"topK\": perturbation", "perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"target\": if self.target_map", "Args: mean_image: The mean image of the data set(The assumption is that the", "np.reshape(self.saliency2, [w * h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class SmoothGradientsAttack(object):", "[w, h, c]) elif attack_method == \"target\": self.use_target = True if self.target_map is", "\"The attack was not successfull for maximum allowed perturbation size equal to {}\"", "NET, w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh = np.meshgrid(np.arange(h),", "self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i", "MEAN_IMAGE[:, :, :, 0] = 103.939 MEAN_IMAGE[:, :, :, 1] = 116.779 MEAN_IMAGE[:,", "raise ValueError( \"Surrogate model's input dimensions is not Compatible with the provided test", "self.perturbed_image)) else: pass if criterion == 1.: return None predicted_scores = self.run_model(self.sess, self.NET.output,", "_pickle as pkl import matplotlib.pyplot as plt from pylab import rcParams import scipy", "equal to {}, the resulting perturbation size was equal to {} # '''.format(epsilon,", "in_image): ref_subtracted = in_image - self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\\", "feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\")", "= np.zeros(w * h) self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess,", "# '''.format(epsilon, # np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence", "self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map", "self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2=", "the attack (one of \"correlation\", \"mass_center\" or \"intersection\") beta_0: parameter for manipulate (target)", "# print( # \"The attack was not successfull for maximum allowed perturbation size", "feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1,", "scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion", "h, c]) return np.sign(perturbation) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):", "= True MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32) MEAN_IMAGE[:, :, :, 0] =", "iterations alpha: perturbation size in each iteration of the attack measure: measure for", "self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph:", "to the original paper) num_steps: Number of steps in Integrated Gradients Algorithm reference_image:", "= 1. perturb_size = 0. last_image = None for counter in range(iters): pert", "was not successfull for maximum allowed perturbation size equal to {}\" # .format(epsilon))", "centers in original and perturbed images confidence: The prediction confidence of the perturbed", "print( # '''For maximum allowed perturbation size equal to {}, the resulting perturbation", "Already Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores) return False def update_new_image(self, test_image,", "= target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def run_model(self, sess, operation, feed, NET): if", "y[num * 20:num * 20 + 20] = dic_temp[\"y\"] labels = dic_temp[\"labels\"] return", "saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else: return 1 def iterative_attack(self, attack_method,", "self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed],", "target_map self.original_output = self.NET2.predict(test_image[None, :]) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image,", "/ self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion", "self.NET.mass_center], self.perturbed_image, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection =", "perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in range(self.num_steps)]),0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert,", "and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center,", "4: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target:", "sess: Session containing model(and surrogate model's) graphs test_image: Mean subtracted test image original_label:", "name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.original_output = self.NET.predict(test_image[None, :]) _, num_class", "alpha * np.sign(pert) - self.test_image, -bound, bound) d = in_image + alpha *", "One of \"mass_center\", \"topK\" or \"random\" epsilon: set of allowed maximum $ell_infty$ of", "np.max( np.abs(self.test_image - self.perturbed_image)) else: # print(\"labels is changed\") pass if min_criterion ==", "attack iterations alpha: perturbation size in each iteration of the attack measure: measure", "Number of steps in Integrated Gradients Algorithm reference_image: not used pixel_max: maximum pixel", "False if target_map is not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :])", "Dimensions\") if sum([ NET.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3] ]):", "of the attack (one of \"correlation\", \"mass_center\" or \"intersection\") Returns: intersection: The portion", "= self.run_model(self.sess, self.target_direction, in_image, self.NET2) debug = self.run_model(self.sess, self.debug, in_image, self.NET2) print(\"MSE: \",", "measure=\"intersection\"): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: set of", "np.abs(self.test_image - self.perturbed_image)) else: # print(\"labels is changed\") pass if min_criterion == 1.:", "not Compatible with the provided test image!\" ) if self.check_prediction(sess, original_label, test_image, NET):", "confidence: The prediction confidence of the perturbed image \"\"\" self.beta_0 = beta_0 self.beta_1", "NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: feed,", "self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w", "if criterion < min_criterion: min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max(", "self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph:", "np.array([ in_image + np.random.uniform(-1, 1, size=in_image.shape) * self.radii for _ in range(self.num_steps) ])", "saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion =", "for i in [1, 2, 3] ]): raise ValueError( \"Model's input dimensions is", "if len(image.shape) == 4 else [image]}) if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction", "self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.reference_image: self.reference_image, NET.label_ph: self.original_label, self.target_map_ph:", "size in each iteration of the attack measure: measure for success of the", "are in the top K salient pixels of the perturbed image devided correlation:", "return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else:", "[image]}) if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already Incorrect!\") return True", "attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None): \"\"\" Args: attack_method: One of", "self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.original_output =", "test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None): w, h, c =", "self.use_target = True else: self.use_target = False self.beta_0 = beta_0 self.beta_1 = beta_1", "self.test_image.copy() if attack_method == 'target': self.use_target = True else: self.use_target = False self.beta_0", "self.target_map is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss", "np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def dataReader(): X = np.zeros((100, 227, 227, 3)) y =", "return None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) self.saliency2, self.top2,", "assumed that NET.saliency is the saliency map tensor and NET.saliency_flatten is its flatten", "[feed], NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\")", "The prediction confidence of the perturbed image \"\"\" w, h, c = self.test_image.shape", "* h) self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals,", "= 1. last_image = None for counter in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert)", "sess, operation, feed, NET): if len(feed.shape) == 3: if hasattr(self, \"original_topk\") and hasattr(", "epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: # print(\"attack\") min_criterion =", "network with the same structure and weights of the orignal network but with", "\"target\": if self.target_map is None: raise ValueError(\"No target region determined!\") else: perturbation =", "self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx,", "* 20 + 20] = dic_temp[\"X\"] y[num * 20:num * 20 + 20]", "self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method", "self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.reference_image: self.reference_image, NET.label_ph: self.original_label,", "+ self.beta_1_ph * tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def", "prediction confidence of the perturbed image \"\"\" w, h, c = self.test_image.shape test_image_pert", "sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, radii=4, reference_image=None, target_map=None, pixel_max=255.): self.radii", "= np.max( np.abs(self.test_image - self.perturbed_image)) else: pass if criterion == 1.: return None", "c = self.test_image.shape test_image_pert = self.test_image.copy() self.original = self.test_image.copy() if attack_method == 'target':", "The rank correlation between saliency maps of original and perturbed image center_dislocation: The", "= -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image): ref_subtracted = in_image - self.reference_image counterfactuals =", "if sum([ NET.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3] ]): raise", "return self.sess = sess self.target_map = target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top) if NET2", "= self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. for counter in range(iters): #", "scipy.stats as stats from tensorflow.python.ops import gen_nn_ops config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth = True", "= 0. last_image = None for counter in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert)", "self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return", "model's) graphs test_image: Mean subtracted test image original_label: True label of the image", "NET2.saliency_flatten is its flatten version. k_top: the topK parameter of the attack (refer", "if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET) criterion = float(len(np.intersect1d(self.topK,", "= dic_temp[\"y\"] labels = dic_temp[\"labels\"] return X, y.astype(int), labels class SimpleGradientAttack(object): def __init__(self,", "image!\" ) if self.check_prediction(sess, original_label, test_image, NET): return self.sess = sess self.target_map =", "NET self.NET2 = NET2 self.test_image = test_image self.original_label = original_label self.mean_image = mean_image", "alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or", "h, c]) elif attack_method == \"target\": self.use_target = True if self.target_map is None:", "* np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def dataReader(): X = np.zeros((100, 227, 227, 3)) y", "tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph", "= self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def create_extra_ops(self, NET, w, h,", "was equal to {} # '''.format(epsilon, # np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores = self.run_model(self.sess,", "check_prediction(self, sess, original_label, image, NET): \"\"\" If the network's prediction is incorrect in", "in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method == \"target\": self.use_target", "self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\")", "It's assumed that NET.saliency is the saliency map tensor and NET2.saliency_flatten is its", "tf import random import _pickle as pkl import matplotlib.pyplot as plt from pylab", "pass if criterion == 1.: return None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET)", "to {}, the resulting perturbation size was equal to {} # '''.format(epsilon, #", "paper) num_steps: Number of steps in Integrated Gradients Algorithm reference_image: Mean subtracted reference", "3: raise ValueError(\"Invalid Test Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise", "= self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure)", "+ d * proj_ratio out_image = np.clip(out_image, -self.mean_image, self.pixel_max - self.mean_image) return out_image", "return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if len(feed.shape) == 3: if", "of perturbations, eg:[2,4] iters: number of maximum allowed attack iterations alpha: perturbation size", "1) != original_label: print(\"Network's Prediction is Already Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores, 1)) print(\"Label:", "activations replaced by softplus function (necessary only when the activation function of the", "criterion < min_criterion: # print(\"attack\") min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size =", ".format(epsilon)) # return 1., 1., self.original_confidence, 0. return None # print( # '''For", "network's prediction is incorrect in the first place, attacking has no meaning.\"\"\" predicted_scores", "if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET) criterion = float(len(np.intersect1d(self.topK,", "1. for counter in range(iters): # if counter % int(iters / 5) ==", "the attack measure: measure for success of the attack (one of \"correlation\", \"mass_center\"", "> bound: proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio = 1 out_image", "self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, }) else:", "paper) pixel_max: the maximum pixel value in the image. \"\"\" self.pixel_max = pixel_max", "else: self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self, NET, w, h, k_top): top_val,", "= -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0]", "np.sign(pert) - self.test_image, -bound, bound) d = in_image + alpha * pert -", "apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf): if self.mean_image is None: self.mean_image", "# \"The attack was not successfull for maximum allowed perturbation size equal to", "of the original function does not have second order gradients, ex: ReLU). It's", "labels = dic_temp[\"labels\"] return X, y.astype(int), labels class SimpleGradientAttack(object): def __init__(self, mean_image, sess,", "self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, NET.reference_image: self.reference_image,", "self.perturbed_image, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection = float(len(np.intersect1d(self.topK,", "self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: #", ":]) counterfactuals = self.create_counterfactuals(test_image) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET)", "shape=[None, num_class], name='original_output_ph') # only for the manipulation attack self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')", "np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:] self.elements1 =", "* h], name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h],", "= scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int)", "topK parameter of the attack (refer to the original paper) num_steps: Number of", "print(\"Network's Prediction is Already Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores, 1)) print(\"Label: \", original_label) return", "return sess.run(operation, feed_dict={ NET.input: [feed], NET.reference_image: self.reference_image, NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif", "Algorithm reference_image: Mean subtracted reference image of Integrated Gradients Algorithm pixel_max: the maximum", "debug) perturbation = np.reshape(perturbation, [w, h, c]) return np.sign(perturbation) def apply_perturb(self, in_image, pert,", "self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else: return 1. def iterative_attack(self,", "of the attack measure: measure for success of the attack (one of \"correlation\",", "check_measure(self, test_image_pert, measure): prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) ==", "measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET) criterion = float(len(np.intersect1d(self.topK, top2)))", "is its flatten version. NET2: Surrogate neural network with the same structure and", "radii / (255. / pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image, test_image, original_label, NET, NET2=NET2, k_top=1000,", "(refer to the original paper) pixel_max: the maximum pixel value in the image.", "feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1,", "def create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss,", "create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss, NET.input)[0]", "self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1", "== \"mass_center\": perturbation = self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h,", ":, :, 2] = 123.68 EPSILON = 1e-12 MIN_INPUT = -MEAN_IMAGE MAX_INPUT =", "test image!\" ) if self.check_prediction(sess, original_label, test_image, NET): return self.sess = sess self.target_map", "False def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh,", "* np.sign(pert) - self.test_image, -bound, bound) d = in_image + alpha * np.sign(pert)", "Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: Allowed maximum $ell_infty$ of", "iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or", "ord=np.inf): if self.mean_image is None: self.mean_image = np.zeros_like(in_image) # out_image = self.test_image +", "NET.input: feed, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self,", "= np.max( np.abs(self.test_image - self.perturbed_image)) else: # print(\"labels is changed\") pass if min_criterion", "self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.reference_image: self.reference_image,", "= -MEAN_IMAGE MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def dataReader(): X =", "input image \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test", "= self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] *", "= float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten,", "= True if self.target_map is None: raise ValueError(\"No target region determined!\") else: perturbation", "NET.input: [feed], NET.label_ph: self.original_label, }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and", "name='target_map_ph') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32,", "\"correlation\", \"mass_center\" or \"intersection\") beta_0: parameter for manipulate (target) attack beta_1: parameter for", "= original_label self.mean_image = mean_image self.k_top = k_top self.num_steps = num_steps self.reference_image =", "k_top) y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency * x_mesh) /", ":, :, 1] = 116.779 MEAN_IMAGE[:, :, :, 2] = 123.68 EPSILON =", "= 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center =", "* x_mesh) / (w * h), tf.reduce_sum(NET.saliency * y_mesh) / (w * h)", "NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise", "topK parameter of the attack (refer to the original paper) pixel_max: the maximum", "d * proj_ratio out_image = np.clip(out_image, -self.mean_image, self.pixel_max - self.mean_image) return out_image def", "]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if len(feed.shape) == 3:", "1 def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method:", "neural network. It's assumed that NET.saliency is the saliency map tensor and NET.saliency_flatten", "epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None): \"\"\" Args: attack_method: One of \"mass_center\",", "self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET) correlation = scipy.stats.spearmanr(", "self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed],", "= self.test_image.copy() min_criterion = 1. perturb_size = 0. last_image = None for counter", "confidence: The prediction confidence of the perturbed image \"\"\" w, h, c =", "of \"correlation\", \"mass_center\" or \"intersection\") beta_0: parameter for manipulate (target) attack beta_1: parameter", "test_image.shape[-i] for i in [1, 2, 3] ]): raise ValueError( \"Surrogate model's input", "0. return None # print( # '''For maximum allowed perturbation size equal to", "NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's input dimensions is not Compatible with the provided test", "K salient pixels in the original picture that are in the top K", "of the attack (refer to the original paper) pixel_max: the maximum pixel value", "[feed], NET.label_ph: self.original_label, }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and hasattr(", "-np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET)", "= np.reshape(perturbation, [w, h, c]) return np.sign(perturbation) def apply_perturb(self, in_image, pert, alpha, bound=8", "self.sess = sess self.target_map = target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top) if NET2 is", "import numpy as np import tensorflow as tf import random import _pickle as", "invalid!\") def give_simple_perturbation(self, attack_method, in_image): w, h, c = self.test_image.shape if attack_method ==", "= in_image - self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\\ for i", "self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def create_extra_ops(self, NET,", "raise ValueError(\"Invalid measure!\") return criterion else: return 1. def iterative_attack(self, attack_method, epsilon, iters=100,", "self.original_confidence = np.max(predicted_scores) return False def update_new_image(self, test_image, original_label, target_map=None): w, h, c", "else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center })", "self.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss,", "and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk,", "= np.random.normal(size=(self.num_steps, w, h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.NET2.topK_direction,", "num_steps=100, radii=4, reference_image=None, target_map=None, pixel_max=255.): self.radii = radii / (255. / pixel_max) super(UniGradientsAttack,", "else: pass if criterion == 1.: return None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image,", "salient pixels of the perturbed image devided correlation: The rank correlation between saliency", "steps in Integrated Gradients Algorithm reference_image: Mean subtracted reference image of Integrated Gradients", "= None for counter in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert = self.apply_perturb(test_image_pert,", "# '''For maximum allowed perturbation size equal to {}, the resulting perturbation size", "True label of the image NET: Original neural network. It's assumed that NET.saliency", "= bound / np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio = 1 out_image = self.test_image +", "self.beta_1_ph * tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def run_model(self,", "manipulate (target) attack Returns: intersection: The portion of the top K salient pixels", "target_map=None, pixel_max=255.): self.radii = radii / (255. / pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image, test_image,", "alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\"", "else: # print(\"labels is changed\") pass if min_criterion == 1.: # print( #", "ValueError(\"Invalid measure!\") return criterion else: return 1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1,", "def check_measure(self, test_image_pert, measure): prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1)", "k_top self.num_steps = num_steps self.reference_image = np.zeros_like( test_image) if reference_image is None else", "k_top) if NET2 is None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top)", "mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None, pixel_max=255.): \"\"\" Args: mean_image:", "self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def", "== \"target\": self.use_target = True if self.target_map is None: raise ValueError(\"No target region", "self.test_image + d * proj_ratio out_image = np.clip(out_image, -self.mean_image, self.pixel_max - self.mean_image) return", "counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx", "MIN_INPUT = -MEAN_IMAGE MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def dataReader(): X", "np.reshape(perturbation, [w, h, c]) return np.sign(perturbation) def apply_perturb(self, in_image, pert, alpha, bound=8 /", "self.test_image = test_image self.original_label = original_label assert self.check_prediction(self.sess, original_label, test_image, self.NET) == False", "min_criterion = 1. last_image = None for counter in range(iters): pert = self.give_simple_perturbation(attack_method,", "output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def run_model(self, sess, operation, feed,", "mean_image: The mean image of the data set(The assumption is that the images", "with the same structure and weights of the orignal network but with activations", "attack_method == \"random\": perturbation = np.random.normal(size=(w, h, c)) elif attack_method == \"topK\": perturbation", "]): raise ValueError( \"Surrogate model's input dimensions is not Compatible with the provided", "saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure ==", "tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')", "perturb_size, cos_distance class IntegratedGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000,", "h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class IntegratedGradientsAttack(object): def __init__(self, sess,", "as np import tensorflow as tf import random import _pickle as pkl import", "in original and perturbed images confidence: The prediction confidence of the perturbed image", "scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else: return 1. def", "elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self,", "image if len(image.shape) == 4 else [image]}) if np.argmax(predicted_scores, 1) != original_label: print(\"Network's", "predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2= self.run_model\\", "feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else:", "pert = self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion", "Algorithm reference_image: not used pixel_max: maximum pixel value in the input image \"\"\"", "self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if", "center_dislocation, confidence, perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess, mean_image, test_image, original_label, NET,", "in_image + alpha * pert - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm", "reference_image w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w * h], name='topk_ph')", "-tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if", "self.original_output = self.NET2.predict(test_image[None, :]) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image, self.NET)", "= np.max(predicted_scores) return False def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx =", "\"random\" epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8 iters: number of maximum allowed", "c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in range(self.num_steps)]),0) return np.sign(perturbation_summed) def apply_perturb(self, in_image,", "h, c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in range(self.num_steps)]),0) return np.sign(perturbation_summed) def apply_perturb(self,", "(target) attack Returns: intersection: The portion of the top K salient pixels in", "if self.mean_image is None: self.mean_image = np.zeros_like(in_image) # out_image = self.test_image + np.clip(", "hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk,", "MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32) MEAN_IMAGE[:, :, :, 0] = 103.939 MEAN_IMAGE[:,", "self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph:", "self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, })", "if criterion == 1.: return None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence", "of \"correlation\", \"mass_center\" or \"intersection\") Returns: intersection: The portion of the top K", "reference_image: Mean subtracted reference image of Integrated Gradients Algorithm pixel_max: the maximum pixel", "!= 3: raise ValueError(\"Invalid Test Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]:", "self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure ==", "self.NET) if np.argmax(prob, 1) == self.original_label: if measure == \"intersection\": top2 = self.run_model(self.sess,", "w, h, c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in range(self.num_steps)]),0) return np.sign(perturbation_summed) def", "cos_distance class SmoothGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100,", "NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph:", "* h]))[-self.k_top:] self.elements1 = np.zeros(w * h) self.elements1[elem1] = 1 self.original_topk = self.elements1", "target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image): ref_subtracted = in_image - self.reference_image", "# print(\"labels is changed\") pass if min_criterion == 1.: # print( # \"The", "np.argmax(prob, 1) == self.original_label: if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, test_image_pert,", "np.clip( # in_image + alpha * np.sign(pert) - self.test_image, -bound, bound) d =", "original_label self.mean_image = mean_image self.k_top = k_top self.num_steps = num_steps self.reference_image = np.zeros_like(", "h]))[0] intersection = float(len(np.intersect1d(self.topK, self.top2))) / self.k_top center_dislocation = np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance", "[self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 =", "num_class = self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') # only for", "self.original_label = original_label assert self.check_prediction(self.sess, original_label, test_image, self.NET) == False if target_map is", "/ 255, ord=np.inf): if self.mean_image is None: self.mean_image = np.zeros_like(in_image) # out_image =", "region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps,", "= np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in range(self.num_steps)]),0)", "return None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals =", "self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert) if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, counterfactuals,", "# only for the manipulation attack self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32,", "= tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center -", "sess, test_image, original_label, NET, NET2=None, k_top=1000, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean", "ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, in_image, self.NET2) debug =", "equal to {} # '''.format(epsilon, # np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores = self.run_model(self.sess, self.NET.output,", "}) else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): w, h,", "np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in range(self.num_steps)]),0) return", "y_mesh) / (w * h) ]) def create_attack_ops(self, NET, w, h): topK_loss =", "attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w,", "self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image): ref_subtracted = in_image", "print(\"Label: \", original_label) return True else: self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self,", "in [1, 2, 3] ]): raise ValueError( \"Surrogate model's input dimensions is not", "the topK parameter of the attack (refer to the original paper) pixel_max: the", "else: return sess.run(operation, feed_dict={ NET.input: feed, NET.reference_image: self.reference_image, NET.label_ph: self.original_label, }) else: raise", "in the top K salient pixels of the perturbed image devided correlation: The", "-tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis =", "self.NET2) debug = self.run_model(self.sess, self.debug, in_image, self.NET2) print(\"MSE: \", debug) perturbation = np.reshape(perturbation,", "None for counter in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert = self.apply_perturb(test_image_pert, pert,", "227, 3)) y = np.zeros(100) for num in range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num),", "NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={", "= self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image,", "target_map self.original_output = self.NET2.predict(test_image[None, :]) counterfactuals = self.create_counterfactuals(test_image) self.saliency1, self.topK = self.run_model( self.sess,", "attack_method == \"topK\": perturbation = self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps,", "+ np.random.normal(scale=0.1 * (in_image.max() - in_image.min()), size=in_image.shape) for _ in range(self.num_steps) ]) return", "config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth = True MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32) MEAN_IMAGE[:,", "* h) ]) def create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))", "c = self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(self.num_steps, w, h, c))", "== False if target_map is not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None,", "as plt from pylab import rcParams import scipy import scipy.stats as stats from", "= self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] *", "pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert,", "matplotlib.pyplot as plt from pylab import rcParams import scipy import scipy.stats as stats", "self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center = self.mass_center1 def", "neural network with the same structure and weights of the orignal network but", "self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def create_extra_ops(self, NET, w,", "no meaning.\"\"\" predicted_scores = sess.run( NET.output, feed_dict={NET.input: image if len(image.shape) == 4 else", "= test_image self.original_label = original_label assert self.check_prediction(self.sess, original_label, test_image, self.NET) == False if", "when the activation function of the original function does not have second order", "-tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if", "\"random\": perturbation = np.random.normal(size=(w, h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess,", "self.NET2.predict(test_image[None, :]) counterfactuals = self.create_counterfactuals(test_image) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals,", "test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None): w, h, c = test_image.shape", "criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: # print(\"attack\") min_criterion = criterion", "to the original paper) pixel_max: the maximum pixel value in the image. \"\"\"", "tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') # only for the manipulation attack self.beta_0_ph =", "sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4:", "= original_label self.mean_image = mean_image self.k_top = k_top w, h, c = self.mean_image.shape", "manipulate (target) attack beta_1: parameter for manipulate (target) attack Returns: intersection: The portion", "is Already Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self,", "None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) self.saliency1, self.topK = self.run_model( self.sess,", "num_steps=100, reference_image=None, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image of the data", "self.check_measure(test_image_pert, measure) if criterion < min_criterion: min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size", "self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"mass_center\": perturbation", "flatten version. NET2: Surrogate neural network with the same structure and weights of", "perturbation = np.random.normal(size=(w, h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.topK_direction,", "the perturbed image \"\"\" w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() self.original", "\"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif", "saliency maps of original and perturbed image center_dislocation: The L2 distance between saliency", "self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if sum([ NET2.input.get_shape()[-i] != test_image.shape[-i] for i in [1,", "attack_method, in_image): counterfactuals = self.create_counterfactuals(in_image) w, h, c = self.test_image.shape if attack_method ==", "is incorrect in the first place, attacking has no meaning.\"\"\" predicted_scores = sess.run(", "1e-12 MIN_INPUT = -MEAN_IMAGE MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def dataReader():", "raise ValueError(\"Invalid measure!\") return criterion else: return 1 def iterative_attack(self, attack_method, epsilon, iters=100,", "of the attack (refer to the original paper) num_steps: Number of steps in", "beta_0 self.beta_1 = beta_1 w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion", "self.target_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.mean(perturbation,", "predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2,", "self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals =", "NET): \"\"\" If the network's prediction is incorrect in the first place, attacking", "self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.original_output = self.NET.predict(test_image[None, :]) _,", "np.reshape(perturbation, [w, h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.mass_center_direction, in_image,", "center_dislocation: The L2 distance between saliency map mass centers in original and perturbed", "the same structure and weights of the orignal network but with activations replaced", "self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection", "\"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: set of allowed", "prediction is incorrect in the first place, attacking has no meaning.\"\"\" predicted_scores =", "NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate model's input dimensions is not", "self.NET2.predict(test_image[None, :]) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten =", "def give_simple_perturbation(self, attack_method, in_image): counterfactuals = self.create_counterfactuals(in_image) w, h, c = self.test_image.shape if", "- MEAN_IMAGE def dataReader(): X = np.zeros((100, 227, 227, 3)) y = np.zeros(100)", "ord=ord) if d_norm > bound: proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio", "Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: set of allowed maximum", "self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32,", "tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug = topK_loss NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum(", "tensor and NET.saliency_flatten is its flatten version. NET2: Surrogate neural network with the", "if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already Incorrect!\") return True else:", "+ 20] = dic_temp[\"X\"] y[num * 20:num * 20 + 20] = dic_temp[\"y\"]", "self.perturbed_image)) else: # print(\"labels is changed\") pass if min_criterion == 1.: # print(", "create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug = topK_loss NET.topK_direction", "= 1 out_image = self.test_image + d * proj_ratio out_image = np.clip(out_image, -self.mean_image,", "self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2= self.run_model\\", "self.test_image, -bound, bound) d = in_image + alpha * np.sign(pert) - self.test_image d_norm", "top2))) / self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET)", "elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation,", "def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None): \"\"\" Args: attack_method:", "sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph:", "its flatten version. NET2: Surrogate neural network with the same structure and weights", "self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def check_prediction(self, sess, original_label, image, NET): \"\"\" If the network's", "measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET) criterion = float(len(np.intersect1d(self.topK, top2)))", "np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores, 1))", "NET2 is None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3]", "c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w * h], name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32,", "self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) counterfactuals = self.create_counterfactuals(test_image) self.saliency1, self.topK =", "target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image of the data set(The assumption", "rank correlation between saliency maps of original and perturbed image center_dislocation: The L2", "intersection = float(len(np.intersect1d(self.topK, self.top2))) / self.k_top center_dislocation = np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance =", "* tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image):", "in_image - self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\\ for i in", "counter in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon)", "topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center", "radii=4, reference_image=None, target_map=None, pixel_max=255.): self.radii = radii / (255. / pixel_max) super(UniGradientsAttack, self).__init__(sess,", "3] ]): raise ValueError( \"Model's input dimensions is not Compatible with the provided", "= 1e-12 MIN_INPUT = -MEAN_IMAGE MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def", "else [image]}) if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already Incorrect!\") print(\"Pred:", "feed, NET): if len(feed.shape) == 3: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"):", "region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, in_image, self.NET2) debug = self.run_model(self.sess, self.debug,", "!= 3: raise ValueError(\"Invalid Test Image Dimensions\") if sum([ NET.input.get_shape()[-i] != test_image.shape[-i] for", "range(self.num_steps)]),0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf): if", "config_gpu.gpu_options.allow_growth = True MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32) MEAN_IMAGE[:, :, :, 0]", "counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET)", "y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency * x_mesh) / (w", "NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: [feed],", "self.run_model(self.sess, self.topK_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method ==", "dimensions is not Compatible with the provided test image!\" ) self.NET = NET", "self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={", "only when the activation function of the original function does not have second", "self.debug = topK_loss NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2)", "counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for", "= self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure", "int(iters / 5) == 0: # print(\"Iteration : {}\".format(counter)) pert = self.give_simple_perturbation(attack_method, test_image_pert)", "pixel value in the input image \"\"\" self.pixel_max = pixel_max if len(test_image.shape) !=", "y.astype(int), labels class SimpleGradientAttack(object): def __init__(self, mean_image, sess, test_image, original_label, NET, NET2=None, k_top=1000,", "* h]))[0] intersection = float(len(np.intersect1d(self.topK, self.top2))) / self.k_top center_dislocation = np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int))", "else: return 1 def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\"", "equal to {}\" # .format(epsilon)) # return 1., 1., self.original_confidence, 0. return None", "perturbation = self.run_model(self.sess, self.topK_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif", "self.test_image.copy() min_criterion = 1. perturb_size = 0. last_image = None for counter in", "- self.test_image, -bound, bound) d = in_image + alpha * np.sign(pert) - self.test_image", "self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection = float(len(np.intersect1d(self.topK, self.top2)))", "original_label: True label of the image NET: Original neural network. It's assumed that", "not Compatible with the provided test image!\" ) self.NET = NET self.NET2 =", "2, 3] ]): raise ValueError( \"Model's input dimensions is not Compatible with the", "# print( # '''For maximum allowed perturbation size equal to {}, the resulting", "Compatible with the provided test image!\" ) self.NET = NET self.NET2 = NET2", "\"intersection\") beta_0: parameter for manipulate (target) attack beta_1: parameter for manipulate (target) attack", "= tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')", "[feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1,", "= -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals = np.array([ in_image + np.random.normal(scale=0.1", "size was equal to {} # '''.format(epsilon, # np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores =", "is the saliency map tensor and NET2.saliency_flatten is its flatten version. k_top: the", "original_label: print(\"Network's Prediction is Already Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores, 1)) print(\"Label: \", original_label)", "input dimensions is not Compatible with the provided test image!\" ) self.NET =", "-self.mean_image, self.pixel_max - self.mean_image) return out_image def check_measure(self, test_image_pert, measure): prob = self.run_model(self.sess,", "predicted_scores = sess.run( NET.output, feed_dict={NET.input: image if len(image.shape) == 4 else [image]}) if", "NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph:", "# if counter % int(iters / 5) == 0: # print(\"Iteration : {}\".format(counter))", "sess, original_label, image, NET): \"\"\" If the network's prediction is incorrect in the", "counterfactuals = np.array([ in_image + np.random.uniform(-1, 1, size=in_image.shape) * self.radii for _ in", "self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection = float(len(np.intersect1d(self.topK, self.top2))) / self.k_top center_dislocation =", "{}, the resulting perturbation size was equal to {} # '''.format(epsilon, # np.max(np.abs(self.test_image", "h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2) perturbation", "test_image.shape[-3], test_image.shape[-2]) def check_prediction(self, sess, original_label, image, NET): \"\"\" If the network's prediction", "self.saliency1_flatten, np.reshape(self.saliency2, [w * h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class", "= self.test_image.copy() min_criterion = 1. for counter in range(iters): # if counter %", "* self.beta_0_ph + self.beta_1_ph * tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss,", "+ np.clip( # in_image + alpha * np.sign(pert) - self.test_image, -bound, bound) d", "NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)", "self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.original_output = self.NET.predict(test_image[None, :]) _, num_class =", "criterion else: return 1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\",", "to {}\" # .format(epsilon)) # return 1., 1., self.original_confidence, 0. return None #", "20 + 20] = dic_temp[\"y\"] labels = dic_temp[\"labels\"] return X, y.astype(int), labels class", "attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None): w, h,", "structure and weights of the orignal network but with activations replaced by softplus", "shape=[w, h], name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.original_output =", "to {} # '''.format(epsilon, # np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image,", "None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2=", "not None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss = tf.reduce_mean(", "h, c = self.test_image.shape test_image_pert = self.test_image.copy() self.original = self.test_image.copy() if attack_method ==", "- self.perturbed_image)) else: pass if criterion == 1.: return None predicted_scores = self.run_model(self.sess,", "attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method: One of \"mass_center\",", "return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map", "the data set(The assumption is that the images are mean subtracted) sess: Session", "else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph:", "self.target_map = target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top) if NET2 is None: NET2 =", "in_image, self.NET2) debug = self.run_model(self.sess, self.debug, in_image, self.NET2) print(\"MSE: \", debug) perturbation =", "feed_dict={ NET.input: feed, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\") def", "debug = self.run_model(self.sess, self.debug, in_image, self.NET2) print(\"MSE: \", debug) perturbation = np.reshape(perturbation, [w,", "dimensions is not Compatible with the provided test image!\" ) if self.check_prediction(sess, original_label,", "criterion < min_criterion: min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image", "pixel value in the image. \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3:", "= k_top w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w * h],", "= tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug = topK_loss NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss =", "only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image,", "attack (refer to the original paper) num_steps: Number of steps in Integrated Gradients", "self.test_image + np.clip( # in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)", "test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert) if measure ==", "attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def check_prediction(self, sess, original_label, image, NET): \"\"\" If the", "'''.format(epsilon, # np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence =", "= np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in range(self.num_steps)]),0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha,", "NET, NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image", "in Integrated Gradients Algorithm reference_image: Mean subtracted reference image of Integrated Gradients Algorithm", "i in [1, 2, 3] ]): raise ValueError( \"Model's input dimensions is not", "NET2 is None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if sum([", "= np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"target\": if self.target_map is", "\"topK\" or \"random\" epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8 iters: number of", "NET2=None, k_top=1000, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image of the data", "self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph:", "MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def dataReader(): X = np.zeros((100, 227,", "(NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis", "sess self.target_map = target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top) if NET2 is None: NET2", "beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon:", "NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image):", "= scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection = float(len(np.intersect1d(self.topK, self.top2))) / self.k_top", "self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph:", "self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET) correlation = scipy.stats.spearmanr(", "# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound) d = in_image", "used pixel_max: maximum pixel value in the input image \"\"\" self.pixel_max = pixel_max", "name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.beta_0_ph", "perturbation = np.reshape(perturbation, [w, h, c]) return np.sign(perturbation) def apply_perturb(self, in_image, pert, alpha,", "np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\\ for i in range(self.num_steps)]) return np.array(counterfactuals) def run_model(self,", "measure!\") return criterion else: return 1 def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11,", ") if self.check_prediction(sess, original_label, test_image, NET): return self.sess = sess self.target_map = target_map", "self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, }) elif", "if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return", "self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, }) else: raise", "NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency", "mass centers in original and perturbed images confidence: The prediction confidence of the", "provided test image!\" ) if self.check_prediction(sess, original_label, test_image, NET): return self.sess = sess", "original paper) num_steps: Number of steps in Integrated Gradients Algorithm reference_image: not used", "tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image): ref_subtracted", "noise_ratio=0.1): counterfactuals = np.array([ in_image + np.random.normal(scale=0.1 * (in_image.max() - in_image.min()), size=in_image.shape) for", "}) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif", "self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input:", "import matplotlib.pyplot as plt from pylab import rcParams import scipy import scipy.stats as", "Returns: intersection: The portion of the top K salient pixels in the original", "__init__(self, mean_image, sess, test_image, original_label, NET, NET2=None, k_top=1000, target_map=None, pixel_max=255.): \"\"\" Args: mean_image:", "test_image, NET): return self.sess = sess self.target_map = target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top)", "tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency * x_mesh)", "test image original_label: True label of the image NET: Original neural network. It's", "ex: ReLU). It's assumed that NET.saliency is the saliency map tensor and NET2.saliency_flatten", "* self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction", "attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps,", "* 20 + 20] = dic_temp[\"y\"] labels = dic_temp[\"labels\"] return X, y.astype(int), labels", "allowed maximum $ell_infty$ of perturbations, eg:[2,4] iters: number of maximum allowed attack iterations", "return sess.run(operation, feed_dict={ NET.input: feed, NET.reference_image: self.reference_image, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input", "np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in range(self.num_steps)]),0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8", "original_label self.mean_image = mean_image self.k_top = k_top w, h, c = self.mean_image.shape self.topk_ph", "measure) if criterion < min_criterion: min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size =", "attack was not successfull for maximum allowed perturbation size equal to {}\" #", "perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"target\": self.use_target =", "import scipy import scipy.stats as stats from tensorflow.python.ops import gen_nn_ops config_gpu = tf.ConfigProto()", "np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:] self.elements1 = np.zeros(w * h) self.elements1[elem1] = 1 self.original_topk", "self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map", "else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if sum([ NET2.input.get_shape()[-i] != test_image.shape[-i] for i in", "If the network's prediction is incorrect in the first place, attacking has no", "= np.clip(out_image, -self.mean_image, self.pixel_max - self.mean_image) return out_image def check_measure(self, test_image_pert, measure): prob", "in_image + np.random.uniform(-1, 1, size=in_image.shape) * self.radii for _ in range(self.num_steps) ]) return", "alpha: perturbation size in each iteration of the attack measure: measure for success", "sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph:", "return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class IntegratedGradientsAttack(object): def __init__(self, sess, mean_image,", "self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label,", "for i in [1, 2, 3] ]): raise ValueError( \"Surrogate model's input dimensions", "of \"mass_center\", \"topK\" or \"random\" epsilon: set of allowed maximum $ell_infty$ of perturbations,", "size equal to {}\" # .format(epsilon)) # return 1., 1., self.original_confidence, 0. return", "i in range(self.num_steps)]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if len(feed.shape)", "self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.beta_0_ph =", "the original paper) num_steps: Number of steps in Integrated Gradients Algorithm reference_image: not", "return 1., 1., self.original_confidence, 0. return None # print( # '''For maximum allowed", "self.NET) == False if target_map is not None: self.target_map = target_map self.original_output =", "self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection", "target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def run_model(self, sess, operation, feed, NET): if len(feed.shape)", "w, h, c = self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(w, h,", "return 1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None): \"\"\"", "return 1 def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args:", "h, c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. for counter in", "containing model(and surrogate model's) graphs test_image: Mean subtracted test image original_label: True label", "pixel_max=255.) def create_counterfactuals(self, in_image): counterfactuals = np.array([ in_image + np.random.uniform(-1, 1, size=in_image.shape) *", "ValueError(\"Invalid Test Image Dimensions\") if sum([ NET.input.get_shape()[-i] != test_image.shape[-i] for i in [1,", "self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\":", "with activations replaced by softplus function (necessary only when the activation function of", "in_image): counterfactuals = np.array([ in_image + np.random.uniform(-1, 1, size=in_image.shape) * self.radii for _", "= tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.original_output = self.NET.predict(test_image[None,", "self.NET = NET self.NET2 = NET2 self.test_image = test_image self.original_label = original_label self.mean_image", "center2 = self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure", "self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.reference_image: self.reference_image,", "= self.check_measure(test_image_pert, measure) if criterion < min_criterion: # print(\"attack\") min_criterion = criterion self.perturbed_image", "= beta_0 self.beta_1 = beta_1 w, h, c = self.test_image.shape test_image_pert = self.test_image.copy()", "self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\\ for i in range(self.num_steps)]) return", "def dataReader(): X = np.zeros((100, 227, 227, 3)) y = np.zeros(100) for num", "sum([ NET.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3] ]): raise ValueError(", "= tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss = tf.reduce_mean( target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(", "elif measure == \"cosine\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.spatial.distance.cosine(", "self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert) if", "= self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(self.num_steps, w, h, c)) elif", "NET.input)[0] def create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals = np.array([ in_image + np.random.normal(scale=0.1 * (in_image.max()", "EPSILON = 1e-12 MIN_INPUT = -MEAN_IMAGE MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE", "/ self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET) criterion", "eg:8 iters: number of maximum allowed attack iterations alpha: perturbation size in each", "sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph:", "NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug = topK_loss NET.topK_direction =", "\"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph:", "counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"mass_center\":", "bound=8 / 255, ord=np.inf): if self.mean_image is None: self.mean_image = np.zeros_like(in_image) # out_image", "attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: Allowed maximum $ell_infty$ of perturbations,", "of original and perturbed image center_dislocation: The L2 distance between saliency map mass", "self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image): ref_subtracted = in_image - self.reference_image counterfactuals", "version. NET2: Surrogate neural network with the same structure and weights of the", "'target': self.use_target = True else: self.use_target = False self.beta_0 = beta_0 self.beta_1 =", "name='original_output_ph') # only for the manipulation attack self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph =", "target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation,", "self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method == \"target\": self.use_target =", "test_image_pert) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion", "def create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug = topK_loss", "determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, in_image, self.NET2) debug = self.run_model(self.sess, self.debug, in_image,", "= self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\":", "}) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center", "self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: pass if criterion", "self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w * h])) return intersection, correlation, center_dislocation,", "topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug = topK_loss NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss", "name='beta_1') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32,", "elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1", "self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph')", "in range(self.num_steps) ]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if len(feed.shape)", "alpha * pert - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound:", "self.mean_image = mean_image self.k_top = k_top self.num_steps = num_steps self.reference_image = np.zeros_like( test_image)", "label of the image NET: Original neural network. It's assumed that NET.saliency is", "class UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, radii=4,", "# return 1., 1., self.original_confidence, 0. return None # print( # '''For maximum", "\"Model's input dimensions is not Compatible with the provided test image!\" ) if", "self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input:", "perturbation size equal to {}, the resulting perturbation size was equal to {}", "h) ]) def create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction", "test_image.shape[-2]) def check_prediction(self, sess, original_label, image, NET): \"\"\" If the network's prediction is", "Mean subtracted test image original_label: True label of the image NET: Original neural", "measure == \"cosine\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.spatial.distance.cosine( self.saliency1_flatten,", "elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation,", "= self.mass_center1 def check_prediction(self, sess, original_label, image, NET): \"\"\" If the network's prediction", "image of the data set(The assumption is that the images are mean subtracted)", "maximum pixel value in the input image \"\"\" self.pixel_max = pixel_max if len(test_image.shape)", "np.random.normal(size=(self.num_steps, w, h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.NET2.topK_direction, counterfactuals,", "original_label) def update_new_image(self, test_image, original_label, target_map=None): w, h, c = test_image.shape self.test_image =", "= mean_image self.k_top = k_top w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32,", "np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.NET2.mass_center_direction,", "SmoothGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None,", "self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label,", ":, :, 0] = 103.939 MEAN_IMAGE[:, :, :, 1] = 116.779 MEAN_IMAGE[:, :,", "hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation,", "return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, }) elif len(feed.shape) == 4: if", "None else reference_image w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w *", "1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method:", "self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\": saliency2_flatten = self.run_model(self.sess,", "[feed], NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else:", "self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def create_extra_ops(self, NET, w, h, k_top):", "- self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\\ for i in range(self.num_steps)])", "ref_subtracted = in_image - self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\\ for", "and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input:", "= self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif", "sum([ NET2.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3] ]): raise ValueError(", "assert self.check_prediction(self.sess, original_label, test_image, self.NET) == False if target_map is not None: self.target_map", "attack_method == \"target\": if self.target_map is None: raise ValueError(\"No target region determined!\") else:", "return False def update_new_image(self, test_image, original_label, target_map=None): w, h, c = test_image.shape self.test_image", "_ in range(self.num_steps) ]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if", "return True else: self.original_confidence = np.max(predicted_scores) return False def update_new_image(self, test_image, original_label, target_map=None):", "else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): w, h, c", "self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten = np.reshape( self.saliency1,", "the attack (refer to the original paper) num_steps: Number of steps in Integrated", "self.original_label = original_label self.mean_image = mean_image self.k_top = k_top self.num_steps = num_steps self.reference_image", "print(\"Pred: \", np.argmax(predicted_scores, 1)) print(\"Label: \", original_label) return True else: self.original_confidence = np.max(predicted_scores)", "if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph:", "self.test_image = test_image self.original_label = original_label self.mean_image = mean_image self.k_top = k_top w,", "tf.reduce_mean( target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction", "1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None): \"\"\" Args:", "if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2]", "or \"intersection\") beta_0: parameter for manipulate (target) attack beta_1: parameter for manipulate (target)", "counterfactuals = self.create_counterfactuals(in_image) w, h, c = self.test_image.shape if attack_method == \"random\": perturbation", "test_image self.original_label = original_label assert self.check_prediction(self.sess, original_label, test_image, self.NET) == False if target_map", "= pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3]", "self.NET.top_idx, counterfactuals, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure == \"correlation\":", "if target_map is not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) counterfactuals", "plt from pylab import rcParams import scipy import scipy.stats as stats from tensorflow.python.ops", "self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.mean(perturbation, 0) return", "correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection = float(len(np.intersect1d(self.topK, self.top2))) /", "that NET.saliency is the saliency map tensor and NET2.saliency_flatten is its flatten version.", "shape=[w * h], name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w,", "self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph:", "}) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, }) elif len(feed.shape) ==", "self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map })", "i in [1, 2, 3] ]): raise ValueError( \"Surrogate model's input dimensions is", "out_image = self.test_image + d * proj_ratio out_image = np.clip(out_image, -self.mean_image, self.pixel_max -", "self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image, NET):", "attacking has no meaning.\"\"\" predicted_scores = sess.run( NET.output, feed_dict={NET.input: image if len(image.shape) ==", "reference_image is None else reference_image w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32,", "= np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center],", "if len(feed.shape) == 3: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self,", "self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency,", "0] = 103.939 MEAN_IMAGE[:, :, :, 1] = 116.779 MEAN_IMAGE[:, :, :, 2]", "num in range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as inputs: dic_temp = pkl.load(inputs)", "in [1, 2, 3] ]): raise ValueError( \"Model's input dimensions is not Compatible", "numpy as np import tensorflow as tf import random import _pickle as pkl", "= original_label assert self.check_prediction(self.sess, original_label, test_image, self.NET) == False if target_map is not", "== 4: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and", "[image]}) if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already Incorrect!\") print(\"Pred: \",", "success of the attack (one of \"correlation\", \"mass_center\" or \"intersection\") Returns: intersection: The", "test_image_pert = self.test_image.copy() self.original = self.test_image.copy() if attack_method == 'target': self.use_target = True", "= np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w * h])) return", ": {}\".format(counter)) pert = self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,", "False self.beta_0 = beta_0 self.beta_1 = beta_1 min_criterion = 1. last_image = None", "= self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif", "self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph:", "== self.original_label: if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET) criterion", "attack_method == \"topK\": perturbation = self.run_model(self.sess, self.topK_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w,", "is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss =", "original and perturbed image center_dislocation: The L2 distance between saliency map mass centers", "= self.create_counterfactuals(test_image) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten =", "manipulation attack self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2])", "update_new_image(self, test_image, original_label, target_map=None): w, h, c = test_image.shape self.test_image = test_image self.original_label", "sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph:", "self.NET.mass_center], counterfactuals, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection =", "perturbed image devided correlation: The rank correlation between saliency maps of original and", "self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and hasattr(", "top2 = self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif", "NET.label_ph: self.original_label, }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and hasattr( self,", "NET.reference_image: self.reference_image, NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4: if hasattr(self,", "subtracted test image original_label: True label of the image NET: Original neural network.", ":]) _, num_class = self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') #", "self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center", "np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"target\": self.use_target = True if", "but with activations replaced by softplus function (necessary only when the activation function", "k_top) if sum([ NET2.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3] ]):", "- self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis =", "= tf.placeholder(tf.float32, shape=[w * h], name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph =", "self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion =", "attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: set of allowed maximum $ell_infty$", "pert - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound: proj_ratio =", "of perturbations, eg:8 iters: number of maximum allowed attack iterations alpha: perturbation size", "UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, radii=4, reference_image=None,", "open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as inputs: dic_temp = pkl.load(inputs) X[num * 20:num *", "h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.topK_direction, in_image, self.NET2) perturbation", "import gen_nn_ops config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth = True MEAN_IMAGE = np.zeros((1, 227, 227,", "the perturbed image \"\"\" self.beta_0 = beta_0 self.beta_1 = beta_1 w, h, c", "\"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph:", "np.arange(w)) NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency * x_mesh) / (w * h), tf.reduce_sum(NET.saliency *", "is Already Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores) return False def update_new_image(self,", "float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert,", "self.target_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\", "sess.run( NET.output, feed_dict={NET.input: image if len(image.shape) == 4 else [image]}) if np.argmax(predicted_scores, 1)", "c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.topK_direction, in_image, self.NET2) perturbation =", "* h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class SmoothGradientsAttack(object): def __init__(self,", "Test Image Dimensions\") if sum([ NET.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2,", "\"random\" epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4] iters: number of", "= np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.mean(perturbation, 0) return np.sign(perturbation_summed) def", "pixels in the original picture that are in the top K salient pixels", "beta_0 self.beta_1 = beta_1 min_criterion = 1. last_image = None for counter in", "inputs: dic_temp = pkl.load(inputs) X[num * 20:num * 20 + 20] = dic_temp[\"X\"]", "self.test_image.copy() min_criterion = 1. for counter in range(iters): # if counter % int(iters", "picture that are in the top K salient pixels of the perturbed image", "steps in Integrated Gradients Algorithm reference_image: not used pixel_max: maximum pixel value in", "= self.test_image + np.clip( # in_image + alpha * np.sign(pert) - self.test_image, -bound,", "self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate", "w, h, c = test_image.shape self.test_image = test_image self.original_label = original_label assert self.check_prediction(self.sess,", "= self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method", "is its flatten version. k_top: the topK parameter of the attack (refer to", "= tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None):", "self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten,", "self.create_counterfactuals(in_image) w, h, c = self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(self.num_steps,", "in each iteration of the attack measure: measure for success of the attack", "self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency,", "name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image,", "self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method ==", "None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2]", "self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.reference_image: self.reference_image, NET.label_ph: self.original_label,", "self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph:", "None: raise ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, in_image, self.NET2)", "or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's input dimensions is not Compatible with", "1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center = self.mass_center1", "import rcParams import scipy import scipy.stats as stats from tensorflow.python.ops import gen_nn_ops config_gpu", "correlation: The rank correlation between saliency maps of original and perturbed image center_dislocation:", "self.check_prediction(self.sess, original_label, test_image, self.NET) == False if target_map is not None: self.target_map =", "elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten,", "L2 distance between saliency map mass centers in original and perturbed images confidence:", "[self.num_steps, w, h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals,", "self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map", "\"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top", "self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph:", "is None else reference_image w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w", "center2 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure", "= sess.run( NET.output, feed_dict={NET.input: image if len(image.shape) == 4 else [image]}) if np.argmax(predicted_scores,", "self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2", "perturbation = self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif", "if sum([ NET2.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3] ]): raise", "self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label)", "SimpleGradientAttack(object): def __init__(self, mean_image, sess, test_image, original_label, NET, NET2=None, k_top=1000, target_map=None, pixel_max=255.): \"\"\"", "elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2) perturbation = np.reshape(perturbation,", "\"correlation\", \"mass_center\" or \"intersection\") Returns: intersection: The portion of the top K salient", "pixel_max: maximum pixel value in the input image \"\"\" self.pixel_max = pixel_max if", "num_steps: Number of steps in Integrated Gradients Algorithm reference_image: Mean subtracted reference image", "20] = dic_temp[\"y\"] labels = dic_temp[\"labels\"] return X, y.astype(int), labels class SimpleGradientAttack(object): def", "NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.) def create_counterfactuals(self, in_image): counterfactuals = np.array([ in_image", "self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, })", "= self.run_model(self.sess, self.debug, in_image, self.NET2) print(\"MSE: \", debug) perturbation = np.reshape(perturbation, [w, h,", "NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph:", "== \"cosine\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten)", "len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\") if sum([ NET.input.get_shape()[-i] != test_image.shape[-i]", "Mean subtracted reference image of Integrated Gradients Algorithm pixel_max: the maximum pixel value", "test_image_pert = self.test_image.copy() min_criterion = 1. for counter in range(iters): # if counter", "= test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: pass if criterion ==", "w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. for counter", "NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's input dimensions is not Compatible", "test_image.shape[-2], k_top) if NET2 is None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2],", "self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\":", "= sess self.target_map = target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top) if NET2 is None:", "NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss", "3)).astype(np.float32) MEAN_IMAGE[:, :, :, 0] = 103.939 MEAN_IMAGE[:, :, :, 1] = 116.779", "name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.original_output", "scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w * h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance", "self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation,", "self.test_image, -bound, bound) d = in_image + alpha * pert - self.test_image d_norm", "with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as inputs: dic_temp = pkl.load(inputs) X[num * 20:num", "np.random.normal(scale=0.1 * (in_image.max() - in_image.min()), size=in_image.shape) for _ in range(self.num_steps) ]) return np.array(counterfactuals)", "alpha, bound=8 / 255, ord=np.inf): if self.mean_image is None: self.mean_image = np.zeros_like(in_image) #", "in the input image \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3: raise", "self.target_direction = -tf.gradients(target_loss, NET.input)[0] def run_model(self, sess, operation, feed, NET): if len(feed.shape) ==", "mean_image self.k_top = k_top self.num_steps = num_steps self.reference_image = np.zeros_like( test_image) if reference_image", "correlation, center_dislocation, confidence, perturb_size, cos_distance class SmoothGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label,", "np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if len(feed.shape) == 3: if hasattr(self,", "self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten,", "h, c = self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(w, h, c))", "- self.test_image, -bound, bound) d = in_image + alpha * pert - self.test_image", "operation, feed, NET): if len(feed.shape) == 3: if hasattr(self, \"original_topk\") and hasattr( self,", "feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else:", "= tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') # only for the manipulation attack self.create_attack_ops(self.NET2,", "NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\", "for counter in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,", "in range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as inputs: dic_temp = pkl.load(inputs) X[num", "* test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:] self.elements1 = np.zeros(w * h)", "feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation,", "[w * h]))[0] intersection = float(len(np.intersect1d(self.topK, self.top2))) / self.k_top center_dislocation = np.linalg.norm(self.mass_center1 -", "hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed],", "1., 1., self.original_confidence, 0. return None # print( # '''For maximum allowed perturbation", "self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess,", "= target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals = np.array([", "NET2 self.test_image = test_image self.original_label = original_label self.mean_image = mean_image self.k_top = k_top", "return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center,", "= topK_loss NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction", "feed_dict={ NET.input: feed, NET.reference_image: self.reference_image, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape", "if attack_method == \"random\": perturbation = np.random.normal(size=(w, h, c)) elif attack_method == \"topK\":", "is changed\") pass if min_criterion == 1.: # print( # \"The attack was", "is that the images are mean subtracted) sess: Session containing model(and surrogate model's)", "x_mesh = np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency * x_mesh) / (w *", "\"topK\" or \"random\" epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4] iters:", "of \"mass_center\", \"topK\" or \"random\" epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8 iters:", "if attack_method == 'target': self.use_target = True else: self.use_target = False self.beta_0 =", "= 103.939 MEAN_IMAGE[:, :, :, 1] = 116.779 MEAN_IMAGE[:, :, :, 2] =", "NET.reference_image: self.reference_image, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self,", "place, attacking has no meaning.\"\"\" predicted_scores = sess.run( NET.output, feed_dict={NET.input: image if len(image.shape)", "maximum pixel value in the image. \"\"\" self.pixel_max = pixel_max if len(test_image.shape) !=", "self.test_image, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image, NET): \"\"\" If", "True else: self.original_confidence = np.max(predicted_scores) return False def update_new_image(self, test_image, original_label, target_map=None): w,", "\"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, NET.reference_image:", "original_label, NET, NET2=None, k_top=1000, num_steps=100, radii=4, reference_image=None, target_map=None, pixel_max=255.): self.radii = radii /", "have second order gradients, ex: ReLU). It's assumed that NET.saliency is the saliency", "counter in range(iters): # if counter % int(iters / 5) == 0: #", "# print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if", "w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss =", "[self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]", "order gradients, ex: ReLU). It's assumed that NET.saliency is the saliency map tensor", "target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top) if NET2 is None: NET2 = NET else:", "self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed,", "-bound, bound) d = in_image + alpha * np.sign(pert) - self.test_image d_norm =", "else: return 1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None):", "h], name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph')", "self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.reference_image:", "changed\") pass if min_criterion == 1.: # print( # \"The attack was not", "= self.test_image.shape test_image_pert = self.test_image.copy() self.original = self.test_image.copy() if attack_method == 'target': self.use_target", "tf.reduce_sum(NET.saliency * x_mesh) / (w * h), tf.reduce_sum(NET.saliency * y_mesh) / (w *", "scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else: return 1 def", "the first place, attacking has no meaning.\"\"\" predicted_scores = sess.run( NET.output, feed_dict={NET.input: image", "self.create_counterfactuals(test_image) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten = np.reshape(", "$ell_infty$ of perturbations, eg:[2,4] iters: number of maximum allowed attack iterations alpha: perturbation", "if np.argmax(prob, 1) == self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert) if measure == \"intersection\": top2", "= radii / (255. / pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image, test_image, original_label, NET, NET2=NET2,", "criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: min_criterion = criterion self.perturbed_image =", "images are mean subtracted) sess: Session containing model(and surrogate model's) graphs test_image: Mean", "]) def create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug =", "(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w *", "227, 227, 3)) y = np.zeros(100) for num in range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format(", "else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image", "self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label,", "not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the", "or \"intersection\") Returns: intersection: The portion of the top K salient pixels in", "import tensorflow as tf import random import _pickle as pkl import matplotlib.pyplot as", "self.original_label, }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"):", "+ alpha * np.sign(pert) - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm >", "= tf.stack([ tf.reduce_sum(NET.saliency * x_mesh) / (w * h), tf.reduce_sum(NET.saliency * y_mesh) /", "/ self.k_top center_dislocation = np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w", "k_top w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w * h], name='topk_ph')", "Prediction is Already Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores, 1)) print(\"Label: \", original_label) return True", "in_image): counterfactuals = self.create_counterfactuals(in_image) w, h, c = self.test_image.shape if attack_method == \"random\":", "image of Integrated Gradients Algorithm pixel_max: the maximum pixel value in the image.", "w, h, c = self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(self.num_steps, w,", "def update_new_image(self, test_image, original_label, target_map=None): w, h, c = test_image.shape self.test_image = test_image", "[self.NET.saliency, self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 =", "self.beta_0 = beta_0 self.beta_1 = beta_1 min_criterion = 1. last_image = None for", "tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self,", "NET2=None, k_top=1000, num_steps=100, radii=4, reference_image=None, target_map=None, pixel_max=255.): self.radii = radii / (255. /", "h, c = self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(self.num_steps, w, h,", "feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0,", "* pert - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound: proj_ratio", "20] = dic_temp[\"X\"] y[num * 20:num * 20 + 20] = dic_temp[\"y\"] labels", "print(\"Network's Prediction is Already Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores) return False", "shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph", "= dic_temp[\"labels\"] return X, y.astype(int), labels class SimpleGradientAttack(object): def __init__(self, mean_image, sess, test_image,", "self.mass_center_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method == \"target\":", "self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def create_extra_ops(self, NET, w, h, k_top): top_val,", "in the first place, attacking has no meaning.\"\"\" predicted_scores = sess.run( NET.output, feed_dict={NET.input:", "if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores,", "original_label, NET, NET2=None, k_top=1000, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image of", "k_top=1000, num_steps=100, reference_image=None, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image of the", "self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, NET.reference_image:", "test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: if measure == \"intersection\": top2 =", "= self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion:", "parameter for manipulate (target) attack Returns: intersection: The portion of the top K", "None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss = tf.reduce_mean( target_dis)", "beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\"", "assumption is that the images are mean subtracted) sess: Session containing model(and surrogate", "else reference_image w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w * h],", "None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if sum([ NET2.input.get_shape()[-i] !=", "allowed attack iterations alpha: perturbation size in each iteration of the attack measure:", "sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map })", "self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2,", "self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"target\": self.use_target", "np.zeros_like( test_image) if reference_image is None else reference_image w, h, c = self.mean_image.shape", "only for the manipulation attack self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')", "== \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif", "in range(self.num_steps)]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if len(feed.shape) ==", "original picture that are in the top K salient pixels of the perturbed", "np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency * x_mesh) / (w * h), tf.reduce_sum(NET.saliency", "NET.output) target_loss = tf.reduce_mean( target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean( output_dis) self.debug", "= num_steps self.reference_image = np.zeros_like( test_image) if reference_image is None else reference_image w,", "* proj_ratio out_image = np.clip(out_image, -self.mean_image, self.pixel_max - self.mean_image) return out_image def check_measure(self,", "NET.output, feed_dict={NET.input: image if len(image.shape) == 4 else [image]}) if np.argmax(predicted_scores, 1) !=", "name='original_output_ph') # only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def", "mean_image, sess, test_image, original_label, NET, NET2=None, k_top=1000, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The", "\"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as inputs: dic_temp = pkl.load(inputs) X[num * 20:num * 20", "cos_distance class IntegratedGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100,", "= scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else: return 1", "parameter for manipulate (target) attack beta_1: parameter for manipulate (target) attack Returns: intersection:", "feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map", "Number of steps in Integrated Gradients Algorithm reference_image: Mean subtracted reference image of", "c = self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(w, h, c)) elif", "\"\"\" self.beta_0 = beta_0 self.beta_1 = beta_1 w, h, c = self.test_image.shape test_image_pert", "def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh", "the orignal network but with activations replaced by softplus function (necessary only when", "size=in_image.shape) for _ in range(self.num_steps) ]) return np.array(counterfactuals) def run_model(self, sess, operation, feed,", "= -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis", "perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.mean(perturbation, 0) return np.sign(perturbation_summed)", "confidence = np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx,", "w, h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2)", "h), tf.reduce_sum(NET.saliency * y_mesh) / (w * h) ]) def create_attack_ops(self, NET, w,", "= float(len(np.intersect1d(self.topK, self.top2))) / self.k_top center_dislocation = np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine(", "== \"topK\": perturbation = self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w,", "== \"mass_center\": perturbation = self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w,", "self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3]", "image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): w, h, c = self.test_image.shape if", "k_top=1000, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image of the data set(The", "= np.max(predicted_scores) return False def update_new_image(self, test_image, original_label, target_map=None): w, h, c =", "sess.run(operation, feed_dict={ NET.input: [feed], NET.reference_image: self.reference_image, NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape)", "def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method: One", "NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph:", "test_image.shape[-3], test_image.shape[-2], k_top) if NET2 is None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3],", "\"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={", "self.NET.top_idx, test_image_pert, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure == \"correlation\":", "self.NET2.topK_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method ==", "__init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, radii=4, reference_image=None, target_map=None, pixel_max=255.):", "/ (255. / pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image, test_image, original_label, NET, NET2=NET2, k_top=1000, num_steps=num_steps,", "-MEAN_IMAGE MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def dataReader(): X = np.zeros((100,", "0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf): if", "saliency map tensor and NET.saliency_flatten is its flatten version. NET2: Surrogate neural network", "perturbation = self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])", "-tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals = np.array([ in_image + np.random.normal(scale=0.1 *", "self.k_top = k_top w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w *", "feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0,", "provided test image!\" ) self.NET = NET self.NET2 = NET2 self.test_image = test_image", "c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. perturb_size = 0. last_image", "else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph:", "h, c]) elif attack_method == \"target\": if self.target_map is None: raise ValueError(\"No target", "epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: min_criterion = criterion self.perturbed_image", "Surrogate neural network with the same structure and weights of the orignal network", "NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency * x_mesh) / (w * h), tf.reduce_sum(NET.saliency * y_mesh)", "return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0,", "of maximum allowed attack iterations alpha: perturbation size in each iteration of the", "= target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image): ref_subtracted = in_image -", "tf.ConfigProto() config_gpu.gpu_options.allow_growth = True MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32) MEAN_IMAGE[:, :, :,", "self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert,", "correlation, center_dislocation, confidence, perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess, mean_image, test_image, original_label,", "run_model(self, sess, operation, feed, NET): if len(feed.shape) == 3: if hasattr(self, \"original_topk\") and", "range(self.num_steps)]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if len(feed.shape) == 3:", "correlation, center_dislocation, confidence, perturb_size, cos_distance class IntegratedGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label,", "NET.label_ph: self.original_label, self.topk_ph: self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph:", "= np.random.normal(size=(w, h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.topK_direction, in_image,", "intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class IntegratedGradientsAttack(object): def __init__(self, sess, mean_image, test_image,", "output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss = tf.reduce_mean( target_dis) * self.beta_0_ph + self.beta_1_ph *", "number of maximum allowed attack iterations alpha: perturbation size in each iteration of", "self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction =", "in_image, noise_ratio=0.1): counterfactuals = np.array([ in_image + np.random.normal(scale=0.1 * (in_image.max() - in_image.min()), size=in_image.shape)", "the image. \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test", "= np.zeros((100, 227, 227, 3)) y = np.zeros(100) for num in range(4): with", "tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.original_output = self.NET.predict(test_image[None, :])", "the top K salient pixels in the original picture that are in the", "self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(self.num_steps, w, h, c)) elif attack_method", "print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion", "NET.input)[0] def run_model(self, sess, operation, feed, NET): if len(feed.shape) == 3: if hasattr(self,", "NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map })", "self.mean_image is None: self.mean_image = np.zeros_like(in_image) # out_image = self.test_image + np.clip( #", "assumed that NET.saliency is the saliency map tensor and NET2.saliency_flatten is its flatten", "self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image, NET): \"\"\"", "* h), tf.reduce_sum(NET.saliency * y_mesh) / (w * h) ]) def create_attack_ops(self, NET,", "\"target\": self.use_target = True if self.target_map is None: raise ValueError(\"No target region determined!\")", "name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape", "self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None): w, h, c = test_image.shape self.test_image", "attack measure: measure for success of the attack (one of \"correlation\", \"mass_center\" or", "= np.array([ in_image + np.random.uniform(-1, 1, size=in_image.shape) * self.radii for _ in range(self.num_steps)", "cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w * h])) return intersection, correlation, center_dislocation, confidence,", "w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. perturb_size =", "in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method == \"mass_center\": perturbation", "measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 -", "image center_dislocation: The L2 distance between saliency map mass centers in original and", "alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: min_criterion = criterion", "self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1", "One of \"mass_center\", \"topK\" or \"random\" epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8", "self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else:", "= np.mean(perturbation, 0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255,", "-tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None:", "value in the image. \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3: raise", "Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores, 1)) print(\"Label: \", original_label) return True else: self.original_confidence =", "attack (one of \"correlation\", \"mass_center\" or \"intersection\") Returns: intersection: The portion of the", "self.beta_1_ph * tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self,", "= self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess,", "attack self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image,", "shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): counterfactuals = self.create_counterfactuals(in_image) w, h, c =", "* self.topk_ph)) self.debug = topK_loss NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center", "the original paper) num_steps: Number of steps in Integrated Gradients Algorithm reference_image: Mean", "= -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not", "self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. perturb_size = 0. last_image = None", "self.mean_image = np.zeros_like(in_image) # out_image = self.test_image + np.clip( # in_image + alpha", "labels class SimpleGradientAttack(object): def __init__(self, mean_image, sess, test_image, original_label, NET, NET2=None, k_top=1000, target_map=None,", "attack_method == \"random\": perturbation = np.random.normal(size=(self.num_steps, w, h, c)) elif attack_method == \"topK\":", "perturbed image \"\"\" w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() self.original =", "def run_model(self, sess, operation, feed, NET): if len(feed.shape) == 3: if hasattr(self, \"original_topk\")", "and NET.saliency_flatten is its flatten version. NET2: Surrogate neural network with the same", "else [image]}) if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already Incorrect!\") return", "mean_image self.k_top = k_top w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w", "pixel_max=255.): \"\"\" Args: mean_image: The mean image of the data set(The assumption is", "ValueError(\"Invalid measure!\") return criterion else: return 1 def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1,", "NET.saliency is the saliency map tensor and NET.saliency_flatten is its flatten version. NET2:", "self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph:", "20 + 20] = dic_temp[\"X\"] y[num * 20:num * 20 + 20] =", "\"random\": perturbation = np.random.normal(size=(self.num_steps, w, h, c)) elif attack_method == \"topK\": perturbation =", "raise ValueError(\"Invalid Test Image Dimensions\") if sum([ NET.input.get_shape()[-i] != test_image.shape[-i] for i in", "class IntegratedGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None,", "if target_map is not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) self.saliency1,", "test_image: Mean subtracted test image original_label: True label of the image NET: Original", "image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): counterfactuals = self.create_counterfactuals(in_image) w, h, c", "confidence, perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None,", "Already Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores, 1)) print(\"Label: \", original_label) return True else: self.original_confidence", "the topK parameter of the attack (refer to the original paper) num_steps: Number", "elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten,", "self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label,", "1) != original_label: print(\"Network's Prediction is Already Incorrect!\") return True else: self.original_confidence =", "self.target_map is None: raise ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction,", "self.NET) criterion = scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else:", "\"rb\") as inputs: dic_temp = pkl.load(inputs) X[num * 20:num * 20 + 20]", "0: # print(\"Iteration : {}\".format(counter)) pert = self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum()) test_image_pert =", "the provided test image!\" ) if self.check_prediction(sess, original_label, test_image, NET): return self.sess =", "perturbation = self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])", "shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): w, h, c = self.test_image.shape if attack_method", "test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion <", "NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if sum([ NET2.input.get_shape()[-i] != test_image.shape[-i]", "and self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center,", "image original_label: True label of the image NET: Original neural network. It's assumed", "NET.saliency_flatten is its flatten version. NET2: Surrogate neural network with the same structure", "raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): w, h, c =", "proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio = 1 out_image = self.test_image", "elif attack_method == \"target\": self.use_target = True if self.target_map is None: raise ValueError(\"No", "measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0]", "255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def dataReader(): X = np.zeros((100, 227, 227, 3))", "data set(The assumption is that the images are mean subtracted) sess: Session containing", "Integrated Gradients Algorithm pixel_max: the maximum pixel value in the image. \"\"\" self.pixel_max", "self.topK_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method == \"mass_center\":", "measure for success of the attack (one of \"correlation\", \"mass_center\" or \"intersection\") Returns:", "and NET2.saliency_flatten is its flatten version. k_top: the topK parameter of the attack", "feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else:", "the manipulation attack self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3],", "invalid!\") def give_simple_perturbation(self, attack_method, in_image): counterfactuals = self.create_counterfactuals(in_image) w, h, c = self.test_image.shape", "True if self.target_map is None: raise ValueError(\"No target region determined!\") else: perturbation =", "self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: min_criterion", "sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph:", "0. last_image = None for counter in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert", "iteration of the attack measure: measure for success of the attack (one of", "= target_map self.original_output = self.NET2.predict(test_image[None, :]) counterfactuals = self.create_counterfactuals(test_image) self.saliency1, self.topK = self.run_model(", "pass if min_criterion == 1.: # print( # \"The attack was not successfull", "shape=[None, num_class], name='original_output_ph') # only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image,", "== 1.: return None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores)", "feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map", "self.NET) if np.argmax(prob, 1) == self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert) if measure == \"intersection\":", "perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess,", "= np.array([ in_image + np.random.normal(scale=0.1 * (in_image.max() - in_image.min()), size=in_image.shape) for _ in", "criterion = scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else: return", "1.: # print( # \"The attack was not successfull for maximum allowed perturbation", "original and perturbed images confidence: The prediction confidence of the perturbed image \"\"\"", "version. k_top: the topK parameter of the attack (refer to the original paper)", "maximum $ell_infty$ of perturbations, eg:8 iters: number of maximum allowed attack iterations alpha:", "range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion =", "(target) attack beta_1: parameter for manipulate (target) attack Returns: intersection: The portion of", "beta_0: parameter for manipulate (target) attack beta_1: parameter for manipulate (target) attack Returns:", "tf.placeholder(tf.float32, name='beta_1') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph = tf.placeholder(", "in_image, self.NET2) print(\"MSE: \", debug) perturbation = np.reshape(perturbation, [w, h, c]) return np.sign(perturbation)", "create_counterfactuals(self, in_image): counterfactuals = np.array([ in_image + np.random.uniform(-1, 1, size=in_image.shape) * self.radii for", "min_criterion == 1.: # print( # \"The attack was not successfull for maximum", "return X, y.astype(int), labels class SimpleGradientAttack(object): def __init__(self, mean_image, sess, test_image, original_label, NET,", "is the saliency map tensor and NET.saliency_flatten is its flatten version. NET2: Surrogate", "original paper) pixel_max: the maximum pixel value in the image. \"\"\" self.pixel_max =", "NET): if len(feed.shape) == 3: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if", "min_criterion: # print(\"attack\") min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image", "= tf.placeholder(tf.float32, name='beta_1') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph =", "self.perturbed_image)))) predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image)", "\"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\")", "flatten version. k_top: the topK parameter of the attack (refer to the original", "feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input:", "create_counterfactuals(self, in_image): ref_subtracted = in_image - self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted +", "[feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output,", "= self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def create_extra_ops(self,", "self.test_image.shape test_image_pert = self.test_image.copy() self.original = self.test_image.copy() if attack_method == 'target': self.use_target =", "is None: self.mean_image = np.zeros_like(in_image) # out_image = self.test_image + np.clip( # in_image", "= self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure", "return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape", "gradients, ex: ReLU). It's assumed that NET.saliency is the saliency map tensor and", "self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\": saliency2_flatten", "NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if sum([ NET2.input.get_shape()[-i] != test_image.shape[-i] for i", "test_image_pert, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess,", "= self.create_counterfactuals(in_image) w, h, c = self.test_image.shape if attack_method == \"random\": perturbation =", "return criterion else: return 1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6,", "feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1,", "image \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image", "rcParams import scipy import scipy.stats as stats from tensorflow.python.ops import gen_nn_ops config_gpu =", "len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\", "tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image, noise_ratio=0.1):", "h, c = test_image.shape self.test_image = test_image self.original_label = original_label assert self.check_prediction(self.sess, original_label,", "= self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32, shape=[None, num_class],", "self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image, NET): \"\"\" If the network's", "in_image): w, h, c = self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(w,", "False def update_new_image(self, test_image, original_label, target_map=None): w, h, c = test_image.shape self.test_image =", "self).__init__(sess, mean_image, test_image, original_label, NET, NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.) def create_counterfactuals(self,", "def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None, pixel_max=255.):", "def create_counterfactuals(self, in_image): counterfactuals = np.array([ in_image + np.random.uniform(-1, 1, size=in_image.shape) * self.radii", "== 0: # print(\"Iteration : {}\".format(counter)) pert = self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum()) test_image_pert", "min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else:", "feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation,", "= test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: # print(\"labels is changed\")", "len(feed.shape) == 3: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\")", "measure) if criterion < min_criterion: # print(\"attack\") min_criterion = criterion self.perturbed_image = test_image_pert.copy()", "return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf): if self.mean_image", "size equal to {}, the resulting perturbation size was equal to {} #", "\", debug) perturbation = np.reshape(perturbation, [w, h, c]) return np.sign(perturbation) def apply_perturb(self, in_image,", "test_image, original_label, target_map=None): w, h, c = test_image.shape self.test_image = test_image self.original_label =", "num_steps self.reference_image = np.zeros_like( test_image) if reference_image is None else reference_image w, h,", "+ self.reference_image\\ for i in range(self.num_steps)]) return np.array(counterfactuals) def run_model(self, sess, operation, feed,", "self.run_model(self.sess, self.debug, in_image, self.NET2) print(\"MSE: \", debug) perturbation = np.reshape(perturbation, [w, h, c])", "network. It's assumed that NET.saliency is the saliency map tensor and NET.saliency_flatten is", "test_image.shape[-3], test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate model's", "$ell_infty$ of perturbations, eg:8 iters: number of maximum allowed attack iterations alpha: perturbation", "perturbation = self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])", "self.check_prediction(sess, original_label, test_image, NET): return self.sess = sess self.target_map = target_map self.create_extra_ops(NET, test_image.shape[-3],", "or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate model's input dimensions is not Compatible", "* y_mesh) / (w * h) ]) def create_attack_ops(self, NET, w, h): topK_loss", "h, c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. perturb_size = 0.", "c = test_image.shape self.test_image = test_image self.original_label = original_label assert self.check_prediction(self.sess, original_label, test_image,", "test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: # print(\"labels is changed\") pass", "Integrated Gradients Algorithm reference_image: not used pixel_max: maximum pixel value in the input", "return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center,", "self.original_label = original_label self.mean_image = mean_image self.k_top = k_top w, h, c =", "range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as inputs: dic_temp = pkl.load(inputs) X[num *", "NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph:", "if d_norm > bound: proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio =", "self.radii = radii / (255. / pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image, test_image, original_label, NET,", "self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label,", "NET.input)[0] def create_counterfactuals(self, in_image): ref_subtracted = in_image - self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps) *", "= np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound: proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)", "NET): return self.sess = sess self.target_map = target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top) if", "pixels of the perturbed image devided correlation: The rank correlation between saliency maps", "for success of the attack (one of \"correlation\", \"mass_center\" or \"intersection\") Returns: intersection:", "test_image_pert = self.test_image.copy() min_criterion = 1. perturb_size = 0. last_image = None for", "= self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert)", "self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed,", "= np.reshape(perturbation, [w, h, c]) elif attack_method == \"target\": self.use_target = True if", "top2))) / self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET)", "self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)", "self.k_top = k_top self.num_steps = num_steps self.reference_image = np.zeros_like( test_image) if reference_image is", "self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph:", "mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, radii=4, reference_image=None, target_map=None, pixel_max=255.): self.radii =", "pert, alpha, bound=8 / 255, ord=np.inf): if self.mean_image is None: self.mean_image = np.zeros_like(in_image)", "for manipulate (target) attack Returns: intersection: The portion of the top K salient", "elif attack_method == \"target\": if self.target_map is None: raise ValueError(\"No target region determined!\")", "# only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def check_prediction(self, sess, original_label,", "self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: # print(\"labels is", "name='original_output_ph') # only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def check_prediction(self, sess,", "eg:[2,4] iters: number of maximum allowed attack iterations alpha: perturbation size in each", "devided correlation: The rank correlation between saliency maps of original and perturbed image", "np.zeros(w * h) self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,", "\"\"\" w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() self.original = self.test_image.copy() if", "feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output,", "self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method == \"mass_center\": perturbation =", "\"mass_center\": perturbation = self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c])", "pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image, test_image, original_label, NET, NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.)", "self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.target_map_ph: self.target_map", "NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map", "self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\": saliency2_flatten", "tf.float32, shape=[None, num_class], name='original_output_ph') # only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2])", "NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return", "NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and", "raise ValueError(\"Invalid Test Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError(", "np.sign(perturbation) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf): if self.mean_image is", "(w * h) ]) def create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten *", "counterfactuals, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess,", "NET, NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.) def create_counterfactuals(self, in_image): counterfactuals = np.array([", "self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess,", "feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation,", "elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1", "self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else: return 1 def iterative_attack(self,", "ValueError( \"Surrogate model's input dimensions is not Compatible with the provided test image!\"", "\"topK\": perturbation = self.run_model(self.sess, self.topK_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c])", "Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's input dimensions", "raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): counterfactuals = self.create_counterfactuals(in_image) w,", "< min_criterion: min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image -", "iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method: One of", "= mean_image self.k_top = k_top self.num_steps = num_steps self.reference_image = np.zeros_like( test_image) if", "self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): w,", "self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w * h], name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph')", "of the perturbed image devided correlation: The rank correlation between saliency maps of", "center_dislocation, confidence, perturb_size, cos_distance class IntegratedGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET,", "self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx", "its flatten version. k_top: the topK parameter of the attack (refer to the", "is not Compatible with the provided test image!\" ) if self.check_prediction(sess, original_label, test_image,", "The L2 distance between saliency map mass centers in original and perturbed images", "self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, }) else:", "self.test_image, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w", "for i in range(self.num_steps)]),0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 /", "else: perturbation = self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h,", "saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure ==", "= self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. perturb_size = 0. last_image =", "self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): counterfactuals", "counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"target\":", "and perturbed image center_dislocation: The L2 distance between saliency map mass centers in", "perturbation size in each iteration of the attack measure: measure for success of", "2] = 123.68 EPSILON = 1e-12 MIN_INPUT = -MEAN_IMAGE MAX_INPUT = 255 *", "else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center })", "NET, NET2=None, k_top=1000, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image of the", "self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return", "self.top2))) / self.k_top center_dislocation = np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2,", "the top K salient pixels of the perturbed image devided correlation: The rank", "num_steps: Number of steps in Integrated Gradients Algorithm reference_image: not used pixel_max: maximum", "Compatible with the provided test image!\" ) if self.check_prediction(sess, original_label, test_image, NET): return", "* h) self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, self.test_image,", "NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph:", "by softplus function (necessary only when the activation function of the original function", "target_map is not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) counterfactuals =", "NET.input: feed, NET.reference_image: self.reference_image, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\")", "is Already Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores, 1)) print(\"Label: \", original_label) return True else:", "np.mean(perturbation, 0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):", "= self.test_image + d * proj_ratio out_image = np.clip(out_image, -self.mean_image, self.pixel_max - self.mean_image)", "\"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure", "hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed,", "not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) self.saliency1, self.topK = self.run_model(", "print(\"Iteration : {}\".format(counter)) pert = self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert, pert,", "original_label, target_map=None): w, h, c = test_image.shape self.test_image = test_image self.original_label = original_label", "np.zeros(100) for num in range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as inputs: dic_temp", "= self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\":", "range(iters): # if counter % int(iters / 5) == 0: # print(\"Iteration :", "h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2) perturbation", "criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten,", "= -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not", "in_image + alpha * np.sign(pert) - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm", "maximum $ell_infty$ of perturbations, eg:[2,4] iters: number of maximum allowed attack iterations alpha:", "for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def check_prediction(self, sess, original_label, image, NET):", "NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's input dimensions is not Compatible with the", "self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(w, h, c)) elif attack_method ==", "= tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32,", "ord=ord) else: proj_ratio = 1 out_image = self.test_image + d * proj_ratio out_image", "h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess,", "print(\"MSE: \", debug) perturbation = np.reshape(perturbation, [w, h, c]) return np.sign(perturbation) def apply_perturb(self,", "out_image def check_measure(self, test_image_pert, measure): prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob,", "NET2: Surrogate neural network with the same structure and weights of the orignal", ":, 0] = 103.939 MEAN_IMAGE[:, :, :, 1] = 116.779 MEAN_IMAGE[:, :, :,", "\"cosine\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else:", "counterfactuals = self.create_counterfactuals(test_image_pert) if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET)", "test_image self.original_label = original_label self.mean_image = mean_image self.k_top = k_top w, h, c", "saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else: return 1. def iterative_attack(self, attack_method,", "random import _pickle as pkl import matplotlib.pyplot as plt from pylab import rcParams", "ValueError(\"Invalid Test Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's", "= self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image,", "123.68 EPSILON = 1e-12 MIN_INPUT = -MEAN_IMAGE MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) -", "in_image + alpha * np.sign(pert) - self.test_image, -bound, bound) d = in_image +", "np import tensorflow as tf import random import _pickle as pkl import matplotlib.pyplot", "in Integrated Gradients Algorithm reference_image: not used pixel_max: maximum pixel value in the", "]) def create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction =", "self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])", "NET.input: [feed], NET.reference_image: self.reference_image, NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4:", "== \"topK\": perturbation = self.run_model(self.sess, self.topK_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h,", "if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate model's input dimensions is", "subtracted reference image of Integrated Gradients Algorithm pixel_max: the maximum pixel value in", "= pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\") if sum([", "test image!\" ) self.NET = NET self.NET2 = NET2 self.test_image = test_image self.original_label", "else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph:", "from tensorflow.python.ops import gen_nn_ops config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth = True MEAN_IMAGE = np.zeros((1,", "1. last_image = None for counter in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert", "self.pixel_max = pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\") if", "self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center", "scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion", "return None # print( # '''For maximum allowed perturbation size equal to {},", "if reference_image is None else reference_image w, h, c = self.mean_image.shape self.topk_ph =", "return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class SmoothGradientsAttack(object): def __init__(self, sess, mean_image,", "}) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input", "target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss = tf.reduce_mean( target_dis) *", "self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3]", "3] ]): raise ValueError( \"Surrogate model's input dimensions is not Compatible with the", "as stats from tensorflow.python.ops import gen_nn_ops config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth = True MEAN_IMAGE", "= in_image + alpha * np.sign(pert) - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if", "self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:] self.elements1 = np.zeros(w", "input dimensions is not Compatible with the provided test image!\" ) if self.check_prediction(sess,", "\"mass_center\", \"topK\" or \"random\" epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4]", "self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: if measure == \"intersection\": top2", "NET.input: [feed], NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4: if hasattr(self,", "== \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif", "Gradients Algorithm reference_image: Mean subtracted reference image of Integrated Gradients Algorithm pixel_max: the", "return out_image def check_measure(self, test_image_pert, measure): prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if", "- self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis =", "between saliency map mass centers in original and perturbed images confidence: The prediction", "and weights of the orignal network but with activations replaced by softplus function", "min_criterion = 1. perturb_size = 0. last_image = None for counter in range(iters):", "- in_image.min()), size=in_image.shape) for _ in range(self.num_steps) ]) return np.array(counterfactuals) def run_model(self, sess,", "perturbation size equal to {}\" # .format(epsilon)) # return 1., 1., self.original_confidence, 0.", "original function does not have second order gradients, ex: ReLU). It's assumed that", ":]) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten = np.reshape(", "- self.mean_image) return out_image def check_measure(self, test_image_pert, measure): prob = self.run_model(self.sess, self.NET.output, test_image_pert,", "NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map", "self.beta_1 = beta_1 w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion =", "else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape)", "self.target_map }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"):", "self.mass_center1 def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh,", "attack_method == 'target': self.use_target = True else: self.use_target = False self.beta_0 = beta_0", "self.NET2 = NET2 self.test_image = test_image self.original_label = original_label self.mean_image = mean_image self.k_top", "return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0,", "self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.target_map_ph:", "perturbed images confidence: The prediction confidence of the perturbed image \"\"\" w, h,", "k_top: the topK parameter of the attack (refer to the original paper) pixel_max:", "}) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk,", "h) self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int)", "attack Returns: intersection: The portion of the top K salient pixels in the", "image \"\"\" w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() self.original = self.test_image.copy()", "NET, NET2=None, k_top=1000, num_steps=100, radii=4, reference_image=None, target_map=None, pixel_max=255.): self.radii = radii / (255.", "= NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]:", "np.abs(self.test_image - self.perturbed_image)) else: pass if criterion == 1.: return None predicted_scores =", "the resulting perturbation size was equal to {} # '''.format(epsilon, # np.max(np.abs(self.test_image -", "= scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid measure!\") return criterion else: return 1.", "# np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores)", "2, 3] ]): raise ValueError( \"Surrogate model's input dimensions is not Compatible with", "self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk,", "prediction confidence of the perturbed image \"\"\" self.beta_0 = beta_0 self.beta_1 = beta_1", "NET.saliency is the saliency map tensor and NET2.saliency_flatten is its flatten version. k_top:", "the original function does not have second order gradients, ex: ReLU). It's assumed", "def check_prediction(self, sess, original_label, image, NET): \"\"\" If the network's prediction is incorrect", "iters: number of maximum allowed attack iterations alpha: perturbation size in each iteration", "== self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert) if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx,", "function does not have second order gradients, ex: ReLU). It's assumed that NET.saliency", "self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph:", "h) self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int)", "== 'target': self.use_target = True else: self.use_target = False self.beta_0 = beta_0 self.beta_1", "is not Compatible with the provided test image!\" ) self.NET = NET self.NET2", "= self.test_image.copy() self.original = self.test_image.copy() if attack_method == 'target': self.use_target = True else:", "for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label,", "model(and surrogate model's) graphs test_image: Mean subtracted test image original_label: True label of", "for maximum allowed perturbation size equal to {}\" # .format(epsilon)) # return 1.,", "1 out_image = self.test_image + d * proj_ratio out_image = np.clip(out_image, -self.mean_image, self.pixel_max", "- self.perturbed_image)) else: # print(\"labels is changed\") pass if min_criterion == 1.: #", "meaning.\"\"\" predicted_scores = sess.run( NET.output, feed_dict={NET.input: image if len(image.shape) == 4 else [image]})", "self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph: self.target_map }) else:", "determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w,", "target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction =", "measure for success of the attack (one of \"correlation\", \"mass_center\" or \"intersection\") beta_0:", "]): raise ValueError( \"Model's input dimensions is not Compatible with the provided test", "if attack_method == \"random\": perturbation = np.random.normal(size=(self.num_steps, w, h, c)) elif attack_method ==", "/ 5) == 0: # print(\"Iteration : {}\".format(counter)) pert = self.give_simple_perturbation(attack_method, test_image_pert) #", "self.test_image.copy() self.original = self.test_image.copy() if attack_method == 'target': self.use_target = True else: self.use_target", "sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return", "/ np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio = 1 out_image = self.test_image + d *", "self.reference_image = np.zeros_like( test_image) if reference_image is None else reference_image w, h, c", "self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph", "(one of \"correlation\", \"mass_center\" or \"intersection\") Returns: intersection: The portion of the top", "is not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) counterfactuals = self.create_counterfactuals(test_image)", "self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure ==", "(w * h), tf.reduce_sum(NET.saliency * y_mesh) / (w * h) ]) def create_attack_ops(self,", "= np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET) correlation", "self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image,", "* (in_image.max() - in_image.min()), size=in_image.shape) for _ in range(self.num_steps) ]) return np.array(counterfactuals) def", "X = np.zeros((100, 227, 227, 3)) y = np.zeros(100) for num in range(4):", "pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\") if sum([ NET.input.get_shape()[-i]", "ValueError( \"Model's input dimensions is not Compatible with the provided test image!\" )", "super(UniGradientsAttack, self).__init__(sess, mean_image, test_image, original_label, NET, NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.) def", "= self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure ==", "intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class SmoothGradientsAttack(object): def __init__(self, sess, mean_image, test_image,", "h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class SmoothGradientsAttack(object): def __init__(self, sess,", "x_mesh) / (w * h), tf.reduce_sum(NET.saliency * y_mesh) / (w * h) ])", "= pkl.load(inputs) X[num * 20:num * 20 + 20] = dic_temp[\"X\"] y[num *", "import random import _pickle as pkl import matplotlib.pyplot as plt from pylab import", "perturbed image center_dislocation: The L2 distance between saliency map mass centers in original", "is None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if sum([ NET2.input.get_shape()[-i]", "116.779 MEAN_IMAGE[:, :, :, 2] = 123.68 EPSILON = 1e-12 MIN_INPUT = -MEAN_IMAGE", "num_class], name='original_output_ph') # only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def check_prediction(self,", "pkl import matplotlib.pyplot as plt from pylab import rcParams import scipy import scipy.stats", "measure): prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: if", "is None: raise ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, in_image,", "self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None): w, h, c", "np.sign(pert) - self.test_image, -bound, bound) d = in_image + alpha * np.sign(pert) -", "top2 = self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif", "w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))", "np.zeros_like(in_image) # out_image = self.test_image + np.clip( # in_image + alpha * np.sign(pert)", "gen_nn_ops config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth = True MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32)", "None: self.mean_image = np.zeros_like(in_image) # out_image = self.test_image + np.clip( # in_image +", "self.NET.saliency_flatten, counterfactuals, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 =", "allowed perturbation size equal to {}, the resulting perturbation size was equal to", "test_image.shape[-i] for i in [1, 2, 3] ]): raise ValueError( \"Model's input dimensions", "of the top K salient pixels in the original picture that are in", "== \"target\": if self.target_map is None: raise ValueError(\"No target region determined!\") else: perturbation", "subtracted) sess: Session containing model(and surrogate model's) graphs test_image: Mean subtracted test image", "1.: return None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) self.saliency2,", "iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\", target=None): \"\"\" Args: attack_method: One", "counterfactuals = np.array([ in_image + np.random.normal(scale=0.1 * (in_image.max() - in_image.min()), size=in_image.shape) for _", "the original paper) pixel_max: the maximum pixel value in the image. \"\"\" self.pixel_max", "between saliency maps of original and perturbed image center_dislocation: The L2 distance between", "h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation", "3: raise ValueError(\"Invalid Test Image Dimensions\") if sum([ NET.input.get_shape()[-i] != test_image.shape[-i] for i", "5) == 0: # print(\"Iteration : {}\".format(counter)) pert = self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum())", "\"Surrogate model's input dimensions is not Compatible with the provided test image!\" )", "feed, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method,", "is None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or", "= beta_0 self.beta_1 = beta_1 min_criterion = 1. last_image = None for counter", "self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET) correlation =", "NET.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3] ]): raise ValueError( \"Model's", "\"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label,", "k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.) def create_counterfactuals(self, in_image): counterfactuals = np.array([ in_image +", "scipy import scipy.stats as stats from tensorflow.python.ops import gen_nn_ops config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth", "confidence of the perturbed image \"\"\" w, h, c = self.test_image.shape test_image_pert =", "if np.argmax(prob, 1) == self.original_label: if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx,", "attack beta_1: parameter for manipulate (target) attack Returns: intersection: The portion of the", "target_map is not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) self.saliency1, self.topK", "epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8 iters: number of maximum allowed attack", "of steps in Integrated Gradients Algorithm reference_image: not used pixel_max: maximum pixel value", "NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image of", "perturbations, eg:8 iters: number of maximum allowed attack iterations alpha: perturbation size in", "does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is", "\"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure", "perturb_size = 0. last_image = None for counter in range(iters): pert = self.give_simple_perturbation(attack_method,", "w, h, c]) perturbation_summed = np.mean(perturbation, 0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert,", "NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph:", "= beta_1 w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1.", "self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten = np.reshape( self.saliency1,", "from pylab import rcParams import scipy import scipy.stats as stats from tensorflow.python.ops import", "original_label) return True else: self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self, NET, w,", "of Integrated Gradients Algorithm pixel_max: the maximum pixel value in the image. \"\"\"", "map mass centers in original and perturbed images confidence: The prediction confidence of", "counterfactuals, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w", "* np.sign(pert) - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound: proj_ratio", "= self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: if measure ==", "ReLU). It's assumed that NET.saliency is the saliency map tensor and NET2.saliency_flatten is", "NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={", "NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return", "proj_ratio out_image = np.clip(out_image, -self.mean_image, self.pixel_max - self.mean_image) return out_image def check_measure(self, test_image_pert,", "(refer to the original paper) num_steps: Number of steps in Integrated Gradients Algorithm", "manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None): w,", "test_image, original_label, NET, NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.) def create_counterfactuals(self, in_image): counterfactuals", "None # print( # '''For maximum allowed perturbation size equal to {}, the", "self.debug, in_image, self.NET2) print(\"MSE: \", debug) perturbation = np.reshape(perturbation, [w, h, c]) return", "self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center,", "d = in_image + alpha * pert - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord)", "# print(\"attack\") min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image -", "top K salient pixels in the original picture that are in the top", "self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.reference_image:", "for i in range(self.num_steps)]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if", "return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) ==", "= tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') # only for the manipulation attack self.beta_0_ph", "for _ in range(self.num_steps) ]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET):", "= NET self.NET2 = NET2 self.test_image = test_image self.original_label = original_label self.mean_image =", "alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: # print(\"attack\") min_criterion", "[w * h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class IntegratedGradientsAttack(object): def", "images confidence: The prediction confidence of the perturbed image \"\"\" w, h, c", "elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:] self.elements1 = np.zeros(w * h) self.elements1[elem1] =", "image. \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image", "the attack (one of \"correlation\", \"mass_center\" or \"intersection\") Returns: intersection: The portion of", "(in_image.max() - in_image.min()), size=in_image.shape) for _ in range(self.num_steps) ]) return np.array(counterfactuals) def run_model(self,", "self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None): w, h, c", "perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000,", "len(image.shape) == 4 else [image]}) if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is", "test_image) if reference_image is None else reference_image w, h, c = self.mean_image.shape self.topk_ph", "[self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]", "- self.perturbed_image)))) predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals =", "self.beta_1 = beta_1 min_criterion = 1. last_image = None for counter in range(iters):", "np.zeros((1, 227, 227, 3)).astype(np.float32) MEAN_IMAGE[:, :, :, 0] = 103.939 MEAN_IMAGE[:, :, :,", "def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf): if self.mean_image is None:", "= NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if sum([ NET2.input.get_shape()[-i] != test_image.shape[-i] for", "self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, })", "np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals,", "np.argmax(predicted_scores, 1)) print(\"Label: \", original_label) return True else: self.original_confidence = np.max(predicted_scores) return False", "is not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) self.saliency1, self.topK =", "= self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure ==", "self.beta_0_ph + self.beta_1_ph * tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0]", "scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection = float(len(np.intersect1d(self.topK, self.top2))) / self.k_top center_dislocation", "self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input:", "self.mean_image = mean_image self.k_top = k_top w, h, c = self.mean_image.shape self.topk_ph =", "attack_method, in_image): w, h, c = self.test_image.shape if attack_method == \"random\": perturbation =", "in range(iters): # if counter % int(iters / 5) == 0: # print(\"Iteration", "= np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess,", "{}\".format(counter)) pert = self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon)", "def give_simple_perturbation(self, attack_method, in_image): w, h, c = self.test_image.shape if attack_method == \"random\":", "image NET: Original neural network. It's assumed that NET.saliency is the saliency map", "[1, 2, 3] ]): raise ValueError( \"Surrogate model's input dimensions is not Compatible", "NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate model's input dimensions is not Compatible with", "Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's input dimensions is", "self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. for counter in range(iters): # if", "epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4] iters: number of maximum", "NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate model's input dimensions is not Compatible with the provided", "self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method ==", "self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2", "/ (w * h), tf.reduce_sum(NET.saliency * y_mesh) / (w * h) ]) def", "of the image NET: Original neural network. It's assumed that NET.saliency is the", "c]) elif attack_method == \"target\": if self.target_map is None: raise ValueError(\"No target region", "mean subtracted) sess: Session containing model(and surrogate model's) graphs test_image: Mean subtracted test", "intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess, mean_image, test_image,", "self.create_counterfactuals(test_image_pert) if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET) criterion =", "227, 227, 3)).astype(np.float32) MEAN_IMAGE[:, :, :, 0] = 103.939 MEAN_IMAGE[:, :, :, 1]", "hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image,", "self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def run_model(self, sess, operation, feed, NET):", "print(\"attack\") min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image))", "mean image of the data set(The assumption is that the images are mean", "tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None): w,", "np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound: proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord) else:", "self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') # only for the manipulation", "= self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.spatial.distance.cosine( self.saliency1_flatten, saliency2_flatten) else: raise ValueError(\"Invalid", "feed_dict={NET.input: image if len(image.shape) == 4 else [image]}) if np.argmax(predicted_scores, 1) != original_label:", "else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError(", "self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, }) else: raise", "test_image self.original_label = original_label self.mean_image = mean_image self.k_top = k_top self.num_steps = num_steps", "raise ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, in_image, self.NET2) debug", "self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph,", "self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph:", "len(feed.shape) == 4: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\")", "self.perturbed_image, self.NET) confidence = np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center],", "saliency map mass centers in original and perturbed images confidence: The prediction confidence", "output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals", "bound) d = in_image + alpha * np.sign(pert) - self.test_image d_norm = np.linalg.norm(d.flatten(),", "== \"random\": perturbation = np.random.normal(size=(w, h, c)) elif attack_method == \"topK\": perturbation =", "= np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency * x_mesh) / (w * h),", "self.original_output_ph = tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') # only for the manipulation attack", "confidence of the perturbed image \"\"\" self.beta_0 = beta_0 self.beta_1 = beta_1 w,", "(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w *", "successfull for maximum allowed perturbation size equal to {}\" # .format(epsilon)) # return", "tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.original_output = self.NET.predict(test_image[None, :]) _, num_class =", "test_image.shape[-2], k_top) if sum([ NET2.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3]", "self.target_direction, in_image, self.NET2) debug = self.run_model(self.sess, self.debug, in_image, self.NET2) print(\"MSE: \", debug) perturbation", "top K salient pixels of the perturbed image devided correlation: The rank correlation", "= self.test_image.copy() if attack_method == 'target': self.use_target = True else: self.use_target = False", "for manipulate (target) attack beta_1: parameter for manipulate (target) attack Returns: intersection: The", "test_image, original_label, NET, NET2=None, k_top=1000, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image", "= tf.reduce_mean( target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean( output_dis) self.debug = target_loss", "# .format(epsilon)) # return 1., 1., self.original_confidence, 0. return None # print( #", "range(self.num_steps) ]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET): if len(feed.shape) ==", "1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1", "}) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.reference_image: self.reference_image, NET.label_ph: self.original_label, }) else:", "Prediction is Already Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores) return False def", "self.mass_center1 def check_prediction(self, sess, original_label, image, NET): \"\"\" If the network's prediction is", "dataReader(): X = np.zeros((100, 227, 227, 3)) y = np.zeros(100) for num in", "self.topk_ph = tf.placeholder(tf.float32, shape=[w * h], name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph", "= np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\\ for i in range(self.num_steps)]) return np.array(counterfactuals) def", "MEAN_IMAGE def dataReader(): X = np.zeros((100, 227, 227, 3)) y = np.zeros(100) for", "self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label,", "in_image, pert, alpha, bound=8 / 255, ord=np.inf): if self.mean_image is None: self.mean_image =", "beta_1=1e6, measure=\"intersection\", target=None): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon:", "perturbation = self.run_model(self.sess, self.target_direction, in_image, self.NET2) debug = self.run_model(self.sess, self.debug, in_image, self.NET2) print(\"MSE:", "sess.run(operation, feed_dict={ NET.input: feed, NET.reference_image: self.reference_image, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image", "self.original_label, self.topk_ph: self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output,", "self.mean_image) return out_image def check_measure(self, test_image_pert, measure): prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET)", "== \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) /", "\"mass_center\", \"topK\" or \"random\" epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8 iters: number", "self.NET) confidence = np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency,", "out_image = np.clip(out_image, -self.mean_image, self.pixel_max - self.mean_image) return out_image def check_measure(self, test_image_pert, measure):", "min_criterion: min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image))", "[test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:] self.elements1 = np.zeros(w *", "* h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class IntegratedGradientsAttack(object): def __init__(self,", "class SimpleGradientAttack(object): def __init__(self, mean_image, sess, test_image, original_label, NET, NET2=None, k_top=1000, target_map=None, pixel_max=255.):", "perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method == \"target\": self.use_target = True", "feed_dict={ NET.input: [feed], NET.reference_image: self.reference_image, NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) ==", "return np.sign(perturbation) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf): if self.mean_image", "model's input dimensions is not Compatible with the provided test image!\" ) self.NET", "c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. for counter in range(iters):", "__init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None, pixel_max=255.): \"\"\"", "stats from tensorflow.python.ops import gen_nn_ops config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth = True MEAN_IMAGE =", "is None: raise ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, counterfactuals,", "NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor", "image \"\"\" self.beta_0 = beta_0 self.beta_1 = beta_1 w, h, c = self.test_image.shape", "c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2) perturbation =", "num), \"rb\") as inputs: dic_temp = pkl.load(inputs) X[num * 20:num * 20 +", "= criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: pass", "of the attack (one of \"correlation\", \"mass_center\" or \"intersection\") beta_0: parameter for manipulate", "= np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:] self.elements1", "raise ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2) perturbation", "1)) print(\"Label: \", original_label) return True else: self.original_confidence = np.max(predicted_scores) return False def", "def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, radii=4, reference_image=None, target_map=None,", "[feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation,", "else: perturbation = self.run_model(self.sess, self.target_direction, in_image, self.NET2) debug = self.run_model(self.sess, self.debug, in_image, self.NET2)", "3: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target:", "[feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input:", "k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center =", "Integrated Gradients Algorithm reference_image: Mean subtracted reference image of Integrated Gradients Algorithm pixel_max:", "self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image, NET): \"\"\"", "give_simple_perturbation(self, attack_method, in_image): w, h, c = self.test_image.shape if attack_method == \"random\": perturbation", "* h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack): def __init__(self,", "that are in the top K salient pixels of the perturbed image devided", "self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top) if NET2 is None: NET2 = NET else: self.create_extra_ops(NET2,", "h], name='target_map_ph') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph = tf.placeholder(", "beta_1 w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. perturb_size", "success of the attack (one of \"correlation\", \"mass_center\" or \"intersection\") beta_0: parameter for", "= self.mass_center1 def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)", "= self.NET2.predict(test_image[None, :]) counterfactuals = self.create_counterfactuals(test_image) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx],", "criterion else: return 1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"):", "self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label,", "self.original_confidence, 0. return None # print( # '''For maximum allowed perturbation size equal", "3)) y = np.zeros(100) for num in range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\")", "original_label: print(\"Network's Prediction is Already Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores) return", "manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def check_prediction(self, sess, original_label, image, NET): \"\"\" If", "np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals", "== \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, test_image_pert, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2)", "self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1,", "= in_image + alpha * pert - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if", "of the perturbed image \"\"\" self.beta_0 = beta_0 self.beta_1 = beta_1 w, h,", "not successfull for maximum allowed perturbation size equal to {}\" # .format(epsilon)) #", "measure=\"intersection\", target=None): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: Allowed", "% int(iters / 5) == 0: # print(\"Iteration : {}\".format(counter)) pert = self.give_simple_perturbation(attack_method,", "print( # \"The attack was not successfull for maximum allowed perturbation size equal", "the input image \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid", "the network's prediction is incorrect in the first place, attacking has no meaning.\"\"\"", "103.939 MEAN_IMAGE[:, :, :, 1] = 116.779 MEAN_IMAGE[:, :, :, 2] = 123.68", "-tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image): ref_subtracted = in_image - self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps)", "NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss,", "cos_distance class UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100,", "original_label, image, NET): \"\"\" If the network's prediction is incorrect in the first", "perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: pass if criterion == 1.: return", "counter % int(iters / 5) == 0: # print(\"Iteration : {}\".format(counter)) pert =", "(necessary only when the activation function of the original function does not have", "test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:] self.elements1 = np.zeros(w * h) self.elements1[elem1]", "h) ]) def create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug", "perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in", "each iteration of the attack measure: measure for success of the attack (one", "original_label, test_image, NET): return self.sess = sess self.target_map = target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],", "np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"target\": if self.target_map is None:", "/ pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image, test_image, original_label, NET, NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map,", "are mean subtracted) sess: Session containing model(and surrogate model's) graphs test_image: Mean subtracted", "- self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound: proj_ratio = bound", "self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]", "self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound: proj_ratio = bound /", "227, 3)).astype(np.float32) MEAN_IMAGE[:, :, :, 0] = 103.939 MEAN_IMAGE[:, :, :, 1] =", "= criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: #", "= tf.ConfigProto() config_gpu.gpu_options.allow_growth = True MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32) MEAN_IMAGE[:, :,", "-tf.gradients(target_loss, NET.input)[0] def run_model(self, sess, operation, feed, NET): if len(feed.shape) == 3: if", "d_norm > bound: proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio = 1", "self.check_measure(test_image_pert, measure) if criterion < min_criterion: # print(\"attack\") min_criterion = criterion self.perturbed_image =", "= self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2,", "criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, counterfactuals,", "network but with activations replaced by softplus function (necessary only when the activation", "= self.test_image.shape if attack_method == \"random\": perturbation = np.random.normal(size=(w, h, c)) elif attack_method", "[self.num_steps, w, h, c]) elif attack_method == \"target\": self.use_target = True if self.target_map", "counterfactuals, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0] intersection = float(len(np.intersect1d(self.topK,", "sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map })", "[w, h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2)", "function of the original function does not have second order gradients, ex: ReLU).", "(255. / pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image, test_image, original_label, NET, NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image,", "Test Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's input", "else: self.use_target = False self.beta_0 = beta_0 self.beta_1 = beta_1 min_criterion = 1.", "or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate model's input dimensions is not Compatible with the", "portion of the top K salient pixels in the original picture that are", "shape=[w, h], name='target_map_ph') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph =", "MEAN_IMAGE[:, :, :, 2] = 123.68 EPSILON = 1e-12 MIN_INPUT = -MEAN_IMAGE MAX_INPUT", "NET.input: [feed], NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map })", "w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug = topK_loss NET.topK_direction = -tf.gradients(topK_loss,", "self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label,", "tensorflow as tf import random import _pickle as pkl import matplotlib.pyplot as plt", "\"topK\": perturbation = self.run_model(self.sess, self.NET2.topK_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h,", "= tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def", "second order gradients, ex: ReLU). It's assumed that NET.saliency is the saliency map", "def __init__(self, mean_image, sess, test_image, original_label, NET, NET2=None, k_top=1000, target_map=None, pixel_max=255.): \"\"\" Args:", "== 1.: # print( # \"The attack was not successfull for maximum allowed", "counterfactuals, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure == \"correlation\": saliency2_flatten", "tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.original_output", "attack (one of \"correlation\", \"mass_center\" or \"intersection\") beta_0: parameter for manipulate (target) attack", "original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean", "def create_counterfactuals(self, in_image): ref_subtracted = in_image - self.reference_image counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted", "= target_map self.original_output = self.NET2.predict(test_image[None, :]) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx],", "target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals = np.array([ in_image", "distance between saliency map mass centers in original and perturbed images confidence: The", "np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.mean(perturbation, 0) return np.sign(perturbation_summed) def apply_perturb(self,", "in the original picture that are in the top K salient pixels of", "or \"random\" epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8 iters: number of maximum", "bound / np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio = 1 out_image = self.test_image + d", "y = np.zeros(100) for num in range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as", "the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def check_prediction(self, sess, original_label, image, NET): \"\"\"", "Gradients Algorithm reference_image: not used pixel_max: maximum pixel value in the input image", "[w * h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class SmoothGradientsAttack(object): def", "reference image of Integrated Gradients Algorithm pixel_max: the maximum pixel value in the", "h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.debug = topK_loss NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0]", "= self.check_measure(test_image_pert, measure) if criterion < min_criterion: min_criterion = criterion self.perturbed_image = test_image_pert.copy()", "np.reshape(self.saliency2, [w * h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack):", "k_top=1000, num_steps=100, radii=4, reference_image=None, target_map=None, pixel_max=255.): self.radii = radii / (255. / pixel_max)", "h): topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum(", "class SmoothGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None,", "set(The assumption is that the images are mean subtracted) sess: Session containing model(and", "counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed = np.mean(perturbation, 0)", "\"mass_center\": perturbation = self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h,", "Already Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self, NET,", "\"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph:", "= scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w * h])) return intersection, correlation, center_dislocation, confidence, perturb_size,", "if self.target_map is None: raise ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess,", "test_image_pert, measure): prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label:", "center_dislocation, confidence, perturb_size, cos_distance class SmoothGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET,", "output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image): ref_subtracted =", "self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.reference_image: self.reference_image, NET.label_ph:", "* 20:num * 20 + 20] = dic_temp[\"y\"] labels = dic_temp[\"labels\"] return X,", "tf.reduce_sum(NET.saliency * y_mesh) / (w * h) ]) def create_attack_ops(self, NET, w, h):", "as inputs: dic_temp = pkl.load(inputs) X[num * 20:num * 20 + 20] =", "weights of the orignal network but with activations replaced by softplus function (necessary", "test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The", "shape=[None, num_class], name='original_output_ph') # only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) def", "Original neural network. It's assumed that NET.saliency is the saliency map tensor and", "w, h, c]) elif attack_method == \"target\": self.use_target = True if self.target_map is", "np.random.normal(size=(w, h, c)) elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.topK_direction, in_image, self.NET2)", "the original picture that are in the top K salient pixels of the", "mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is", "tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss = tf.reduce_mean( target_dis) * self.beta_0_ph +", "True else: self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self, NET, w, h, k_top):", "bound: proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord) else: proj_ratio = 1 out_image =", "attack_method == \"target\": self.use_target = True if self.target_map is None: raise ValueError(\"No target", "= tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape", "self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed =", "self.NET).astype(int) self.original_mass_center = self.mass_center1 def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx =", "self.reference_image, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method,", "True else: self.use_target = False self.beta_0 = beta_0 self.beta_1 = beta_1 min_criterion =", "= np.zeros(100) for num in range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as inputs:", "if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph:", "return False def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)", "def create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals = np.array([ in_image + np.random.normal(scale=0.1 * (in_image.max() -", "IntegratedGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None,", "It's assumed that NET.saliency is the saliency map tensor and NET.saliency_flatten is its", "\"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: Allowed maximum $ell_infty$", "criterion else: return 1 def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"):", "center2) elif measure == \"cosine\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion =", "that NET.saliency is the saliency map tensor and NET.saliency_flatten is its flatten version.", "sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return", "# out_image = self.test_image + np.clip( # in_image + alpha * np.sign(pert) -", "create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh =", "self.topk_ph: self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output, self.target_map_ph:", ") self.NET = NET self.NET2 = NET2 self.test_image = test_image self.original_label = original_label", "self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure == \"correlation\": saliency2_flatten =", "return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack): def __init__(self, sess, mean_image,", "self.use_target = True if self.target_map is None: raise ValueError(\"No target region determined!\") else:", "measure): prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: counterfactuals", "criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: pass if", "sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph:", "self.NET.saliency_flatten, test_image_pert, self.NET) criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 =", "perturbation size was equal to {} # '''.format(epsilon, # np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores", "reference_image=None, target_map=None, pixel_max=255.): \"\"\" Args: mean_image: The mean image of the data set(The", "= np.zeros((1, 227, 227, 3)).astype(np.float32) MEAN_IMAGE[:, :, :, 0] = 103.939 MEAN_IMAGE[:, :,", "= self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) perturbation_summed", "proj_ratio = 1 out_image = self.test_image + d * proj_ratio out_image = np.clip(out_image,", "beta_1: parameter for manipulate (target) attack Returns: intersection: The portion of the top", "< min_criterion: # print(\"attack\") min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max(", "np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf): if self.mean_image is", "in_image.min()), size=in_image.shape) for _ in range(self.num_steps) ]) return np.array(counterfactuals) def run_model(self, sess, operation,", "self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1, [w *", "in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion", "else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): counterfactuals = self.create_counterfactuals(in_image)", "{} # '''.format(epsilon, # np.max(np.abs(self.test_image - self.perturbed_image)))) predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET)", "self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.reference_image: self.reference_image, NET.label_ph:", "of steps in Integrated Gradients Algorithm reference_image: Mean subtracted reference image of Integrated", "self.original = self.test_image.copy() if attack_method == 'target': self.use_target = True else: self.use_target =", "== \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) /", "the image NET: Original neural network. It's assumed that NET.saliency is the saliency", "np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already Incorrect!\") return True else: self.original_confidence", "import _pickle as pkl import matplotlib.pyplot as plt from pylab import rcParams import", "np.max( np.abs(self.test_image - self.perturbed_image)) else: pass if criterion == 1.: return None predicted_scores", "float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals,", "min_criterion = 1. for counter in range(iters): # if counter % int(iters /", "20:num * 20 + 20] = dic_temp[\"X\"] y[num * 20:num * 20 +", "* tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image,", "else: proj_ratio = 1 out_image = self.test_image + d * proj_ratio out_image =", "= np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:] self.elements1 = np.zeros(w * h) self.elements1[elem1] = 1", "test_image.shape self.test_image = test_image self.original_label = original_label assert self.check_prediction(self.sess, original_label, test_image, self.NET) ==", "pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\") if NET.input.get_shape()[-3]!=test_image.shape[-3] or", "of allowed maximum $ell_infty$ of perturbations, eg:[2,4] iters: number of maximum allowed attack", "graphs test_image: Mean subtracted test image original_label: True label of the image NET:", "20:num * 20 + 20] = dic_temp[\"y\"] labels = dic_temp[\"labels\"] return X, y.astype(int),", "\"intersection\") Returns: intersection: The portion of the top K salient pixels in the", "if min_criterion == 1.: # print( # \"The attack was not successfull for", "= test_image self.original_label = original_label self.mean_image = mean_image self.k_top = k_top w, h,", "+ 20] = dic_temp[\"y\"] labels = dic_temp[\"labels\"] return X, y.astype(int), labels class SimpleGradientAttack(object):", "}) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk,", "criterion = scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, test_image_pert,", "* 20:num * 20 + 20] = dic_temp[\"X\"] y[num * 20:num * 20", "elif attack_method == \"topK\": perturbation = self.run_model(self.sess, self.topK_direction, in_image, self.NET2) perturbation = np.reshape(perturbation,", "counterfactuals = self.create_counterfactuals(test_image) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten", "with the provided test image!\" ) self.NET = NET self.NET2 = NET2 self.test_image", "pylab import rcParams import scipy import scipy.stats as stats from tensorflow.python.ops import gen_nn_ops", "num_class], name='original_output_ph') # only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label)", "= 1. for counter in range(iters): # if counter % int(iters / 5)", "self.NET.top_idx], counterfactuals, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]]) elem1 = np.argsort(np.reshape(self.saliency1,", "and perturbed images confidence: The prediction confidence of the perturbed image \"\"\" w,", "confidence = np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET)", "Image Dimensions\") if sum([ NET.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3]", "return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else:", "out_image = self.test_image + np.clip( # in_image + alpha * np.sign(pert) - self.test_image,", "Session containing model(and surrogate model's) graphs test_image: Mean subtracted test image original_label: True", "}) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center", "pixel_max: the maximum pixel value in the image. \"\"\" self.pixel_max = pixel_max if", "self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET) correlation = scipy.stats.spearmanr( self.saliency1_flatten, np.reshape(self.saliency2,", "self.target_direction = -tf.gradients(target_loss, NET.input)[0] def create_counterfactuals(self, in_image, noise_ratio=0.1): counterfactuals = np.array([ in_image +", "self.original_label: if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, test_image_pert, self.NET) criterion =", "}) else: raise RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): counterfactuals =", "the maximum pixel value in the image. \"\"\" self.pixel_max = pixel_max if len(test_image.shape)", "and self.use_target: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, NET.reference_image: self.reference_image,", "surrogate model's) graphs test_image: Mean subtracted test image original_label: True label of the", "\"original_mass_center\"): if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label,", "else: self.original_confidence = np.max(predicted_scores) return False def update_new_image(self, test_image, original_label, target_map=None): w, h,", "!= test_image.shape[-i] for i in [1, 2, 3] ]): raise ValueError( \"Surrogate model's", "= test_image.shape self.test_image = test_image self.original_label = original_label assert self.check_prediction(self.sess, original_label, test_image, self.NET)", "self.original_mass_center }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, }) elif len(feed.shape)", "tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def run_model(self, sess, operation,", "self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: if measure == \"intersection\":", "last_image = None for counter in range(iters): pert = self.give_simple_perturbation(attack_method, test_image_pert) test_image_pert =", "softplus function (necessary only when the activation function of the original function does", "self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk,", "= 1 self.original_topk = self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) self.original_mass_center =", "else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph:", "map tensor and NET2.saliency_flatten is its flatten version. k_top: the topK parameter of", "self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph:", "self.pixel_max - self.mean_image) return out_image def check_measure(self, test_image_pert, measure): prob = self.run_model(self.sess, self.NET.output,", "= test_image self.original_label = original_label self.mean_image = mean_image self.k_top = k_top self.num_steps =", "[w * h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class UniGradientsAttack(SmoothGradientsAttack): def", "= self.give_simple_perturbation(attack_method, test_image_pert) # print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion =", "pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: # print(\"attack\")", "self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"target\": if", "the saliency map tensor and NET.saliency_flatten is its flatten version. NET2: Surrogate neural", "= scipy.stats.spearmanr(self.saliency1_flatten, saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int)", "if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's input dimensions is not", "or \"random\" epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4] iters: number", "counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\\ for i in range(self.num_steps)]) return np.array(counterfactuals)", "test_image, self.NET) == False if target_map is not None: self.target_map = target_map self.original_output", "\"mass_center\" or \"intersection\") Returns: intersection: The portion of the top K salient pixels", "target=None): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: Allowed maximum", "value in the input image \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3:", "tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss = tf.reduce_mean( target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean( output_dis)", "np.reshape(self.saliency2, [w * h])) return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance class IntegratedGradientsAttack(object):", "self.topk_ph)) self.debug = topK_loss NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center -", "= False self.beta_0 = beta_0 self.beta_1 = beta_1 min_criterion = 1. last_image =", "attack (refer to the original paper) pixel_max: the maximum pixel value in the", "parameter of the attack (refer to the original paper) num_steps: Number of steps", "w, h, c = self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w * h], name='topk_ph') self.mass_center_ph", "target_loss = tf.reduce_mean( target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean( output_dis) self.debug =", "prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: counterfactuals =", "tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') # only for the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3],", "= target_map self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2], k_top) if NET2 is None: NET2 = NET", "criterion self.perturbed_image = test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: # print(\"labels", "the provided test image!\" ) self.NET = NET self.NET2 = NET2 self.test_image =", "has no meaning.\"\"\" predicted_scores = sess.run( NET.output, feed_dict={NET.input: image if len(image.shape) == 4", "NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center }) else: return sess.run(operation, feed_dict={", "\"\"\" Args: mean_image: The mean image of the data set(The assumption is that", "!= original_label: print(\"Network's Prediction is Already Incorrect!\") print(\"Pred: \", np.argmax(predicted_scores, 1)) print(\"Label: \",", "self.NET2) print(\"MSE: \", debug) perturbation = np.reshape(perturbation, [w, h, c]) return np.sign(perturbation) def", "in the image. \"\"\" self.pixel_max = pixel_max if len(test_image.shape) != 3: raise ValueError(\"Invalid", "beta_1 w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() min_criterion = 1. for", "w, h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2)", "return criterion else: return 1 def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6,", "the attack (refer to the original paper) pixel_max: the maximum pixel value in", "True MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32) MEAN_IMAGE[:, :, :, 0] = 103.939", "num_class], name='original_output_ph') # only for the manipulation attack self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph", "k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate model's input dimensions", "else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.reference_image: self.reference_image, NET.label_ph: self.original_label, self.target_map_ph: self.target_map })", "else: raise ValueError(\"Invalid measure!\") return criterion else: return 1. def iterative_attack(self, attack_method, epsilon,", "= self.create_counterfactuals(test_image_pert) if measure == \"intersection\": top2 = self.run_model(self.sess, self.NET.top_idx, counterfactuals, self.NET) criterion", "= self.mean_image.shape self.topk_ph = tf.placeholder(tf.float32, shape=[w * h], name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2],", "!= test_image.shape[-i] for i in [1, 2, 3] ]): raise ValueError( \"Model's input", "dic_temp[\"X\"] y[num * 20:num * 20 + 20] = dic_temp[\"y\"] labels = dic_temp[\"labels\"]", "self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image:", "+ np.random.uniform(-1, 1, size=in_image.shape) * self.radii for _ in range(self.num_steps) ]) return np.array(counterfactuals)", "test_image.shape[-3], test_image.shape[-2], k_top) if sum([ NET2.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2,", "perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: # print(\"labels is changed\") pass if", "self.test_image = test_image self.original_label = original_label self.mean_image = mean_image self.k_top = k_top self.num_steps", "images confidence: The prediction confidence of the perturbed image \"\"\" self.beta_0 = beta_0", "the saliency map tensor and NET2.saliency_flatten is its flatten version. k_top: the topK", "if self.target_map is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output)", "original_label, NET, NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.) def create_counterfactuals(self, in_image): counterfactuals =", "map tensor and NET.saliency_flatten is its flatten version. NET2: Surrogate neural network with", "= np.zeros_like(in_image) # out_image = self.test_image + np.clip( # in_image + alpha *", "salient pixels in the original picture that are in the top K salient", "import scipy.stats as stats from tensorflow.python.ops import gen_nn_ops config_gpu = tf.ConfigProto() config_gpu.gpu_options.allow_growth =", "for the manipulation attack self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.create_attack_ops(NET2,", "else: return 1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\"", "= self.elements1 self.mass_center1 = self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self,", "self.reference_image, NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\")", "[w * h]))[-self.k_top:] self.elements1 = np.zeros(w * h) self.elements1[elem1] = 1 self.original_topk =", "self.original_output = self.NET.predict(test_image[None, :]) _, num_class = self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32, shape=[None,", "saliency map tensor and NET2.saliency_flatten is its flatten version. k_top: the topK parameter", "np.sign(pert) - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord) if d_norm > bound: proj_ratio =", "the perturbed image devided correlation: The rank correlation between saliency maps of original", "self.run_model(self.sess, self.NET2.mass_center_direction, counterfactuals, self.NET2) perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method", "self.original_output = self.NET2.predict(test_image[None, :]) counterfactuals = self.create_counterfactuals(test_image) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency,", "for num in range(4): with open( \"./ImagenetValidationSamples/imagenet_sample_{}.pkl\".format( num), \"rb\") as inputs: dic_temp =", "[self.num_steps, w, h, c]) perturbation_summed = np.mean(perturbation, 0) return np.sign(perturbation_summed) def apply_perturb(self, in_image,", "}) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if", "self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph:", "self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert) if measure", "1) == self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert) if measure == \"intersection\": top2 = self.run_model(self.sess,", "RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): w, h, c = self.test_image.shape", "= -tf.gradients(target_loss, NET.input)[0] def run_model(self, sess, operation, feed, NET): if len(feed.shape) == 3:", "= np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"target\": self.use_target = True", "1.: return None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals", "np.clip(out_image, -self.mean_image, self.pixel_max - self.mean_image) return out_image def check_measure(self, test_image_pert, measure): prob =", "print(\"labels is changed\") pass if min_criterion == 1.: # print( # \"The attack", "perturbation = np.reshape(perturbation, [self.num_steps, w, h, c]) elif attack_method == \"mass_center\": perturbation =", "}) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center,", "self.reference_image\\ for i in range(self.num_steps)]) return np.array(counterfactuals) def run_model(self, sess, operation, feed, NET):", "[self.num_steps, w, h, c]) elif attack_method == \"target\": if self.target_map is None: raise", "K salient pixels of the perturbed image devided correlation: The rank correlation between", "beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\" or \"random\" epsilon: set", "None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) counterfactuals = self.create_counterfactuals(test_image) self.saliency1, self.topK", "perturb_size, cos_distance class SmoothGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000,", "raise ValueError( \"Model's input dimensions is not Compatible with the provided test image!\"", "Allowed maximum $ell_infty$ of perturbations, eg:8 iters: number of maximum allowed attack iterations", "ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2) perturbation =", "-bound, bound) d = in_image + alpha * pert - self.test_image d_norm =", "= tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.original_output = self.NET.predict(test_image[None, :]) _, num_class", "maximum allowed perturbation size equal to {}, the resulting perturbation size was equal", "else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, }) elif len(feed.shape) == 4:", "= self.run_model(self.sess, self.topK_direction, in_image, self.NET2) perturbation = np.reshape(perturbation, [w, h, c]) elif attack_method", "MEAN_IMAGE[:, :, :, 1] = 116.779 MEAN_IMAGE[:, :, :, 2] = 123.68 EPSILON", "-tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None:", "_, num_class = self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') # only", "h], name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1') self.original_output = self.NET.predict(test_image[None,", "NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={", "tf.reduce_sum((NET.saliency_flatten * self.topk_ph)) self.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2)", "= True else: self.use_target = False self.beta_0 = beta_0 self.beta_1 = beta_1 min_criterion", "= 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE def dataReader(): X = np.zeros((100, 227, 227,", "the activation function of the original function does not have second order gradients,", "self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.reference_image: self.reference_image, NET.label_ph: self.original_label, })", "mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is", "self.num_steps = num_steps self.reference_image = np.zeros_like( test_image) if reference_image is None else reference_image", "self.run_model(self.sess, self.NET.mass_center, self.test_image, self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image, NET):", "topK_loss NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction =", "return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, NET.reference_image: self.reference_image, self.mass_center_ph: self.original_mass_center,", "if hasattr(self, \"use_target\") and self.use_target: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, NET.reference_image:", "pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure) if criterion < min_criterion: min_criterion =", "{}\" # .format(epsilon)) # return 1., 1., self.original_confidence, 0. return None # print(", "epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args: attack_method: One of \"mass_center\", \"topK\"", "self.NET).astype(int) self.original_mass_center = self.mass_center1 def check_prediction(self, sess, original_label, image, NET): \"\"\" If the", "self.run_model(self.sess, self.target_direction, in_image, self.NET2) debug = self.run_model(self.sess, self.debug, in_image, self.NET2) print(\"MSE: \", debug)", "}) else: return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center,", "top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center = tf.stack([", "= self.create_counterfactuals(self.perturbed_image) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET) correlation", "= self.original_output.shape self.original_output_ph = tf.placeholder( tf.float32, shape=[None, num_class], name='original_output_ph') # only for the", "return 1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11, beta_1=1e6, measure=\"intersection\"): \"\"\" Args:", "* ref_subtracted + self.reference_image\\ for i in range(self.num_steps)]) return np.array(counterfactuals) def run_model(self, sess,", "NET.input)[0] if self.target_map is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph,", "image devided correlation: The rank correlation between saliency maps of original and perturbed", "test_image_pert, self.NET) criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure == \"correlation\": saliency2_flatten", "* np.sign(pert) - self.test_image, -bound, bound) d = in_image + alpha * pert", "prob = self.run_model(self.sess, self.NET.output, test_image_pert, self.NET) if np.argmax(prob, 1) == self.original_label: if measure", "test_image_pert) # print(pert.sum()) test_image_pert = self.apply_perturb(test_image_pert, pert, alpha, epsilon) criterion = self.check_measure(test_image_pert, measure)", "1., self.original_confidence, 0. return None # print( # '''For maximum allowed perturbation size", "+ alpha * np.sign(pert) - self.test_image, -bound, bound) d = in_image + alpha", "the manipulation attack self.create_attack_ops(self.NET2, test_image.shape[-3], test_image.shape[-2]) self.update_new_image(test_image, original_label) def update_new_image(self, test_image, original_label, target_map=None):", "paper) num_steps: Number of steps in Integrated Gradients Algorithm reference_image: not used pixel_max:", "that the images are mean subtracted) sess: Session containing model(and surrogate model's) graphs", "k_top: the topK parameter of the attack (refer to the original paper) num_steps:", "\"mass_center\" or \"intersection\") beta_0: parameter for manipulate (target) attack beta_1: parameter for manipulate", "np.argmax(prob, 1) == self.original_label: counterfactuals = self.create_counterfactuals(test_image_pert) if measure == \"intersection\": top2 =", "self.elements1 = np.zeros(w * h) self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1 =", "pixel_max=255.): self.radii = radii / (255. / pixel_max) super(UniGradientsAttack, self).__init__(sess, mean_image, test_image, original_label,", "target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, in_image, self.NET2) debug = self.run_model(self.sess,", "= -tf.gradients(topK_loss, NET.input)[0] mass_center_loss = -tf.reduce_sum( (NET.mass_center - self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0]", "h]))[-self.k_top:] self.elements1 = np.zeros(w * h) self.elements1[elem1] = 1 self.original_topk = self.elements1 self.mass_center1", "self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten = np.reshape( self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])", "criterion = float(len(np.intersect1d(self.topK, top2))) / self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess,", "set of allowed maximum $ell_infty$ of perturbations, eg:[2,4] iters: number of maximum allowed", "[self.num_steps, w, h, c]) perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\\ for i in range(self.num_steps)]),0) return np.sign(perturbation_summed)", "perturbation_summed = np.mean(perturbation, 0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 /", "reference_image: not used pixel_max: maximum pixel value in the input image \"\"\" self.pixel_max", "pkl.load(inputs) X[num * 20:num * 20 + 20] = dic_temp[\"X\"] y[num * 20:num", "self.original_output, self.target_map_ph: self.target_map }) else: return sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, self.topk_ph:", "- center2) elif measure == \"cosine\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert, self.NET) criterion", "255, ord=np.inf): if self.mean_image is None: self.mean_image = np.zeros_like(in_image) # out_image = self.test_image", "The mean image of the data set(The assumption is that the images are", "feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.target_map_ph: self.target_map }) elif len(feed.shape) == 4: if", "self.mass_center_ph)**2) self.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis = tf.keras.losses.MSE(self.target_map_ph,", "c]) perturbation_summed = np.mean(perturbation, 0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8", "\", original_label) return True else: self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self, NET,", "\", np.argmax(predicted_scores, 1)) print(\"Label: \", original_label) return True else: self.original_confidence = np.max(predicted_scores) return", "tf.placeholder(tf.float32, shape=[w * h], name='topk_ph') self.mass_center_ph = tf.placeholder(tf.float32, shape=[2], name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32,", "return True else: self.original_confidence = np.max(predicted_scores) return False def create_extra_ops(self, NET, w, h,", "image!\" ) self.NET = NET self.NET2 = NET2 self.test_image = test_image self.original_label =", "/ (w * h) ]) def create_attack_ops(self, NET, w, h): topK_loss = tf.reduce_sum((NET.saliency_flatten", "test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, radii=4, reference_image=None, target_map=None, pixel_max=255.): self.radii = radii", "self.NET) confidence = np.max(predicted_scores) self.saliency2, self.top2, self.mass_center2= self.run_model\\ (self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image,", "perturbations, eg:[2,4] iters: number of maximum allowed attack iterations alpha: perturbation size in", "!= original_label: print(\"Network's Prediction is Already Incorrect!\") return True else: self.original_confidence = np.max(predicted_scores)", "of the data set(The assumption is that the images are mean subtracted) sess:", "not None: self.target_map = target_map self.original_output = self.NET2.predict(test_image[None, :]) counterfactuals = self.create_counterfactuals(test_image) self.saliency1,", "in_image + np.random.normal(scale=0.1 * (in_image.max() - in_image.min()), size=in_image.shape) for _ in range(self.num_steps) ])", "activation function of the original function does not have second order gradients, ex:", "function (necessary only when the activation function of the original function does not", "criterion == 1.: return None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence =", "name='mass_center_ph') self.target_map_ph = tf.placeholder(tf.float32, shape=[w, h], name='target_map_ph') self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0') self.beta_1_ph =", "with the provided test image!\" ) if self.check_prediction(sess, original_label, test_image, NET): return self.sess", "measure: measure for success of the attack (one of \"correlation\", \"mass_center\" or \"intersection\")", "= np.reshape(perturbation, [w, h, c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.mass_center_direction,", "np.zeros((100, 227, 227, 3)) y = np.zeros(100) for num in range(4): with open(", "self.use_target = False self.beta_0 = beta_0 self.beta_1 = beta_1 min_criterion = 1. last_image", "or\\ NET.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Model's input dimensions is not Compatible with the provided", "sess.run(operation, feed_dict={ NET.input: feed, NET.label_ph: self.original_label, }) else: raise RuntimeError(\"Input image shape invalid!\")", "self.k_top elif measure == \"correlation\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, counterfactuals, self.NET) criterion =", "np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w * h])) return intersection,", "of the perturbed image \"\"\" w, h, c = self.test_image.shape test_image_pert = self.test_image.copy()", "The portion of the top K salient pixels in the original picture that", "= -np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\": saliency2_flatten = self.run_model(self.sess, self.NET.saliency_flatten, test_image_pert,", "self.original_label, NET.reference_image: self.reference_image, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.beta_0_ph: self.beta_0, self.beta_1_ph: self.beta_1, self.original_output_ph: self.original_output,", "* tf.reduce_mean( output_dis) self.debug = target_loss self.target_direction = -tf.gradients(target_loss, NET.input)[0] def run_model(self, sess,", "the images are mean subtracted) sess: Session containing model(and surrogate model's) graphs test_image:", "NET2.input.get_shape()[-i] != test_image.shape[-i] for i in [1, 2, 3] ]): raise ValueError( \"Surrogate", "test_image.shape[-2], k_top) if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\\ NET2.input.get_shape()[-1]!=test_image.shape[-1]: raise ValueError( \"Surrogate model's input", "= beta_1 min_criterion = 1. last_image = None for counter in range(iters): pert", "if counter % int(iters / 5) == 0: # print(\"Iteration : {}\".format(counter)) pert", "= NET2 self.test_image = test_image self.original_label = original_label self.mean_image = mean_image self.k_top =", "RuntimeError(\"Input image shape invalid!\") def give_simple_perturbation(self, attack_method, in_image): counterfactuals = self.create_counterfactuals(in_image) w, h,", "w, h, c = self.test_image.shape test_image_pert = self.test_image.copy() self.original = self.test_image.copy() if attack_method", ":, 1] = 116.779 MEAN_IMAGE[:, :, :, 2] = 123.68 EPSILON = 1e-12", "float(len(np.intersect1d(self.topK, self.top2))) / self.k_top center_dislocation = np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten,", "= tf.keras.losses.MSE(self.target_map_ph, NET.saliency) output_dis = tf.keras.losses.MSE(self.original_output_ph, NET.output) target_loss = tf.reduce_mean( target_dis) * self.beta_0_ph", "original paper) num_steps: Number of steps in Integrated Gradients Algorithm reference_image: Mean subtracted", "'''For maximum allowed perturbation size equal to {}, the resulting perturbation size was", "perturbed images confidence: The prediction confidence of the perturbed image \"\"\" self.beta_0 =", "give_simple_perturbation(self, attack_method, in_image): counterfactuals = self.create_counterfactuals(in_image) w, h, c = self.test_image.shape if attack_method", "= 116.779 MEAN_IMAGE[:, :, :, 2] = 123.68 EPSILON = 1e-12 MIN_INPUT =", "np.reshape(self.saliency2, [w * h]))[0] intersection = float(len(np.intersect1d(self.topK, self.top2))) / self.k_top center_dislocation = np.linalg.norm(self.mass_center1", "reference_image=reference_image, target_map=target_map, pixel_max=255.) def create_counterfactuals(self, in_image): counterfactuals = np.array([ in_image + np.random.uniform(-1, 1,", "dic_temp = pkl.load(inputs) X[num * 20:num * 20 + 20] = dic_temp[\"X\"] y[num", "return sess.run(operation, feed_dict={ NET.input: [feed], NET.label_ph: self.original_label, self.topk_ph: self.original_topk, self.mass_center_ph: self.original_mass_center, self.target_map_ph: self.target_map", "if NET2 is None: NET2 = NET else: self.create_extra_ops(NET2, test_image.shape[-3], test_image.shape[-2], k_top) if", "maps of original and perturbed image center_dislocation: The L2 distance between saliency map", "None predicted_scores = self.run_model(self.sess, self.NET.output, self.perturbed_image, self.NET) confidence = np.max(predicted_scores) counterfactuals = self.create_counterfactuals(self.perturbed_image)", "self.k_top center_dislocation = np.linalg.norm(self.mass_center1 - self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w *", "as pkl import matplotlib.pyplot as plt from pylab import rcParams import scipy import", "\"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif", "= np.zeros_like( test_image) if reference_image is None else reference_image w, h, c =", "c]) elif attack_method == \"mass_center\": perturbation = self.run_model(self.sess, self.mass_center_direction, in_image, self.NET2) perturbation =", "= k_top self.num_steps = num_steps self.reference_image = np.zeros_like( test_image) if reference_image is None", "dic_temp[\"y\"] labels = dic_temp[\"labels\"] return X, y.astype(int), labels class SimpleGradientAttack(object): def __init__(self, mean_image,", "[w, h, c]) return np.sign(perturbation) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255,", "None: raise ValueError(\"No target region determined!\") else: perturbation = self.run_model(self.sess, self.target_direction, counterfactuals, self.NET2)", "self.target_map_ph: self.target_map }) elif len(feed.shape) == 4: if hasattr(self, \"original_topk\") and hasattr( self,", "= self.NET2.predict(test_image[None, :]) self.saliency1, self.topK = self.run_model( self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image, self.NET) self.saliency1_flatten", "resulting perturbation size was equal to {} # '''.format(epsilon, # np.max(np.abs(self.test_image - self.perturbed_image))))", "orignal network but with activations replaced by softplus function (necessary only when the", "mean_image, test_image, original_label, NET, NET2=NET2, k_top=1000, num_steps=num_steps, reference_image=reference_image, target_map=target_map, pixel_max=255.) def create_counterfactuals(self, in_image):", "and perturbed images confidence: The prediction confidence of the perturbed image \"\"\" self.beta_0", "d = in_image + alpha * np.sign(pert) - self.test_image d_norm = np.linalg.norm(d.flatten(), ord=ord)", "= 123.68 EPSILON = 1e-12 MIN_INPUT = -MEAN_IMAGE MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32)", "X[num * 20:num * 20 + 20] = dic_temp[\"X\"] y[num * 20:num *", "measure!\") return criterion else: return 1. def iterative_attack(self, attack_method, epsilon, iters=100, alpha=1, beta_0=1e11,", "Gradients Algorithm pixel_max: the maximum pixel value in the image. \"\"\" self.pixel_max =", "w, h, c]) elif attack_method == \"target\": if self.target_map is None: raise ValueError(\"No", "== 3: if hasattr(self, \"original_topk\") and hasattr( self, \"original_mass_center\"): if hasattr(self, \"use_target\") and", "(one of \"correlation\", \"mass_center\" or \"intersection\") beta_0: parameter for manipulate (target) attack beta_1:", "bound) d = in_image + alpha * pert - self.test_image d_norm = np.linalg.norm(d.flatten(),", "dic_temp[\"labels\"] return X, y.astype(int), labels class SimpleGradientAttack(object): def __init__(self, mean_image, sess, test_image, original_label,", "sess, mean_image, test_image, original_label, NET, NET2=None, k_top=1000, num_steps=100, reference_image=None, target_map=None, pixel_max=255.): \"\"\" Args:", "tensor and NET2.saliency_flatten is its flatten version. k_top: the topK parameter of the", "self.original_mass_center = self.mass_center1 def create_extra_ops(self, NET, w, h, k_top): top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten,", "test_image_pert.copy() perturb_size = np.max( np.abs(self.test_image - self.perturbed_image)) else: pass if criterion == 1.:", "not used pixel_max: maximum pixel value in the input image \"\"\" self.pixel_max =", "perturbed image \"\"\" self.beta_0 = beta_0 self.beta_1 = beta_1 w, h, c =", "Algorithm pixel_max: the maximum pixel value in the image. \"\"\" self.pixel_max = pixel_max", "i in range(self.num_steps)]),0) return np.sign(perturbation_summed) def apply_perturb(self, in_image, pert, alpha, bound=8 / 255,", "- self.mass_center2.astype(int)) cos_distance = scipy.spatial.distance.cosine( self.saliency1_flatten, np.reshape(self.saliency2, [w * h])) return intersection, correlation,", "for counter in range(iters): # if counter % int(iters / 5) == 0:", "of the orignal network but with activations replaced by softplus function (necessary only", "= tf.nn.top_k(NET.saliency_flatten, k_top) y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w)) NET.mass_center = tf.stack([ tf.reduce_sum(NET.saliency *", "original_label assert self.check_prediction(self.sess, original_label, test_image, self.NET) == False if target_map is not None:", "(NET.mass_center - self.mass_center_ph)**2) NET.mass_center_direction = -tf.gradients(mass_center_loss, NET.input)[0] if self.target_map is not None: target_dis", "4 else [image]}) if np.argmax(predicted_scores, 1) != original_label: print(\"Network's Prediction is Already Incorrect!\")", "test_image_pert, self.NET).astype(int) criterion = -np.linalg.norm(self.mass_center1 - center2) elif measure == \"cosine\": saliency2_flatten =", "for success of the attack (one of \"correlation\", \"mass_center\" or \"intersection\") beta_0: parameter", "if criterion < min_criterion: # print(\"attack\") min_criterion = criterion self.perturbed_image = test_image_pert.copy() perturb_size", ":, 2] = 123.68 EPSILON = 1e-12 MIN_INPUT = -MEAN_IMAGE MAX_INPUT = 255", "confidence, perturb_size, cos_distance class SmoothGradientsAttack(object): def __init__(self, sess, mean_image, test_image, original_label, NET, NET2=None,", "target_map=target_map, pixel_max=255.) def create_counterfactuals(self, in_image): counterfactuals = np.array([ in_image + np.random.uniform(-1, 1, size=in_image.shape)", "np.reshape(perturbation, [w, h, c]) elif attack_method == \"target\": self.use_target = True if self.target_map", "saliency2_flatten)[0] elif measure == \"mass_center\": center2 = self.run_model(self.sess, self.NET.mass_center, counterfactuals, self.NET).astype(int) criterion =", "intersection: The portion of the top K salient pixels in the original picture", "ref_subtracted + self.reference_image\\ for i in range(self.num_steps)]) return np.array(counterfactuals) def run_model(self, sess, operation,", "parameter of the attack (refer to the original paper) pixel_max: the maximum pixel", "if len(test_image.shape) != 3: raise ValueError(\"Invalid Test Image Dimensions\") if sum([ NET.input.get_shape()[-i] !=", "1. perturb_size = 0. last_image = None for counter in range(iters): pert =" ]
[ "QDialog windows and currently isn't used. \"\"\" def __init__(self, main_window): super().__init__() self.main_window =", "class is responsible for building a small area below of the item menu", "for building a small area below of the item menu when some item", "has been replaced for QDialog windows and currently isn't used. \"\"\" def __init__(self,", "class PlotWidget(QWidget): \"\"\"MenuInfo Widget This class is responsible for building a small area", "\"\"\" def __init__(self, main_window): super().__init__() self.main_window = main_window self._create_widgets() self._add_widget_to_layout() def _create_widgets(self): self.plot_button", "self.wireframe = QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation') self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout", "currently isn't used. \"\"\" def __init__(self, main_window): super().__init__() self.main_window = main_window self._create_widgets() self._add_widget_to_layout()", "QCheckBox('Deformation') self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout = QFormLayout() self.setLayout(self.layout) self.layout.addRow(self.wireframe) self.layout.addRow(self.deformation)", "= QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation') self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout =", "small area below of the item menu when some item is clicked. This", "is responsible for building a small area below of the item menu when", "QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo Widget This class is responsible", "replaced for QDialog windows and currently isn't used. \"\"\" def __init__(self, main_window): super().__init__()", "QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout = QFormLayout() self.setLayout(self.layout) self.layout.addRow(self.wireframe) self.layout.addRow(self.deformation) self.layout.addRow(self.animate) self.layout.addRow(self.plot_button) def", "<reponame>open-pulse/OpenPulse from os.path import isfile from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QPushButton, QFormLayout,", "from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo", "some item is clicked. This has been replaced for QDialog windows and currently", "for QDialog windows and currently isn't used. \"\"\" def __init__(self, main_window): super().__init__() self.main_window", "os.path import isfile from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox", "import isfile from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox class", "building a small area below of the item menu when some item is", "the item menu when some item is clicked. This has been replaced for", "def _create_widgets(self): self.plot_button = QPushButton('Plot') self.wireframe = QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation') self.animate =", "item is clicked. This has been replaced for QDialog windows and currently isn't", "self.plot_button = QPushButton('Plot') self.wireframe = QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation') self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function)", "item menu when some item is clicked. This has been replaced for QDialog", "of the item menu when some item is clicked. This has been replaced", "QFormLayout, QMessageBox, QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo Widget This class is responsible for building", "super().__init__() self.main_window = main_window self._create_widgets() self._add_widget_to_layout() def _create_widgets(self): self.plot_button = QPushButton('Plot') self.wireframe =", "responsible for building a small area below of the item menu when some", "self._add_widget_to_layout() def _create_widgets(self): self.plot_button = QPushButton('Plot') self.wireframe = QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation') self.animate", "and currently isn't used. \"\"\" def __init__(self, main_window): super().__init__() self.main_window = main_window self._create_widgets()", "QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation') self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout = QFormLayout()", "windows and currently isn't used. \"\"\" def __init__(self, main_window): super().__init__() self.main_window = main_window", "\"\"\"MenuInfo Widget This class is responsible for building a small area below of", "This has been replaced for QDialog windows and currently isn't used. \"\"\" def", "def _add_widget_to_layout(self): self.layout = QFormLayout() self.setLayout(self.layout) self.layout.addRow(self.wireframe) self.layout.addRow(self.deformation) self.layout.addRow(self.animate) self.layout.addRow(self.plot_button) def _plot_function(self): self.main_window.draw()", "isn't used. \"\"\" def __init__(self, main_window): super().__init__() self.main_window = main_window self._create_widgets() self._add_widget_to_layout() def", "been replaced for QDialog windows and currently isn't used. \"\"\" def __init__(self, main_window):", "QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo Widget This class is responsible for building a small", "self.main_window = main_window self._create_widgets() self._add_widget_to_layout() def _create_widgets(self): self.plot_button = QPushButton('Plot') self.wireframe = QCheckBox('Wireframe')", "area below of the item menu when some item is clicked. This has", "PlotWidget(QWidget): \"\"\"MenuInfo Widget This class is responsible for building a small area below", "= main_window self._create_widgets() self._add_widget_to_layout() def _create_widgets(self): self.plot_button = QPushButton('Plot') self.wireframe = QCheckBox('Wireframe') self.deformation", "= QCheckBox('Deformation') self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout = QFormLayout() self.setLayout(self.layout) self.layout.addRow(self.wireframe)", "main_window): super().__init__() self.main_window = main_window self._create_widgets() self._add_widget_to_layout() def _create_widgets(self): self.plot_button = QPushButton('Plot') self.wireframe", "= QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout = QFormLayout() self.setLayout(self.layout) self.layout.addRow(self.wireframe) self.layout.addRow(self.deformation) self.layout.addRow(self.animate) self.layout.addRow(self.plot_button)", "def __init__(self, main_window): super().__init__() self.main_window = main_window self._create_widgets() self._add_widget_to_layout() def _create_widgets(self): self.plot_button =", "QWidget, QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo Widget This class", "below of the item menu when some item is clicked. This has been", "QPushButton('Plot') self.wireframe = QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation') self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self):", "= QPushButton('Plot') self.wireframe = QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation') self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def", "This class is responsible for building a small area below of the item", "QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo Widget This class is", "self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout = QFormLayout() self.setLayout(self.layout) self.layout.addRow(self.wireframe) self.layout.addRow(self.deformation) self.layout.addRow(self.animate) self.layout.addRow(self.plot_button) def _plot_function(self):", "QPushButton, QFormLayout, QMessageBox, QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo Widget This class is responsible for", "__init__(self, main_window): super().__init__() self.main_window = main_window self._create_widgets() self._add_widget_to_layout() def _create_widgets(self): self.plot_button = QPushButton('Plot')", "menu when some item is clicked. This has been replaced for QDialog windows", "Widget This class is responsible for building a small area below of the", "is clicked. This has been replaced for QDialog windows and currently isn't used.", "QMessageBox, QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo Widget This class is responsible for building a", "used. \"\"\" def __init__(self, main_window): super().__init__() self.main_window = main_window self._create_widgets() self._add_widget_to_layout() def _create_widgets(self):", "self._create_widgets() self._add_widget_to_layout() def _create_widgets(self): self.plot_button = QPushButton('Plot') self.wireframe = QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation')", "main_window self._create_widgets() self._add_widget_to_layout() def _create_widgets(self): self.plot_button = QPushButton('Plot') self.wireframe = QCheckBox('Wireframe') self.deformation =", "when some item is clicked. This has been replaced for QDialog windows and", "self.deformation = QCheckBox('Deformation') self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout = QFormLayout() self.setLayout(self.layout)", "self.animate = QCheckBox('Animate') self.plot_button.clicked.connect(self._plot_function) def _add_widget_to_layout(self): self.layout = QFormLayout() self.setLayout(self.layout) self.layout.addRow(self.wireframe) self.layout.addRow(self.deformation) self.layout.addRow(self.animate)", "clicked. This has been replaced for QDialog windows and currently isn't used. \"\"\"", "from os.path import isfile from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox,", "a small area below of the item menu when some item is clicked.", "import QWidget, QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo Widget This", "isfile from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox class PlotWidget(QWidget):", "PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox class PlotWidget(QWidget): \"\"\"MenuInfo Widget", "_create_widgets(self): self.plot_button = QPushButton('Plot') self.wireframe = QCheckBox('Wireframe') self.deformation = QCheckBox('Deformation') self.animate = QCheckBox('Animate')" ]
[ "for the PyEcore ' '\\'global_registry\\''), ] boolean_options = ['auto-register-package'] def initialize_options(self): \"\"\"Set default", "file and return the root package. :param ecore_model_path: path to Ecore XMI file", "Ecore XMI files \"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore XMI files in", "This is always called as late as possible, ie. after any option assignments", "level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for all Ecore XMI files starting from", "for all the options that this command supports. This is always called as", "('auto-register-package', None, 'Generate package auto-registration for the PyEcore ' '\\'global_registry\\''), ] boolean_options =", "logging.INFO, 2: logging.DEBUG }) logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self,", "relative output path to dictionary output_path = pathlib.Path(output).relative_to('.') if model == 'default': self.output.default_factory", "'default': self.output.default_factory = lambda: output_path else: self.output[model] = output_path else: logger.warn('Ignoring invalid output", "modules with user-provided mixins to import from ' 'generated classes'), ('auto-register-package', None, 'Generate", "invalid output specifier {!r}.', token) # parse user-modules option tokens = shlex.split(self.user_modules, comments=True)", "model from a Ecore XMI file and return the root package. :param ecore_model_path:", "contextlib import distutils.log as logger import logging import pathlib import shlex import pyecore.resources", "possible, ie. after any option assignments from the command-line or from other commands", "{} for token in tokens: model, user_module = token.split('=', 1) # check if", "tokens = shlex.split(self.output, comments=True) self.output = collections.defaultdict(lambda: None) for token in tokens: model,", "classes from Ecore models. This process is controlled by the user options passed", "token) def _configure_logging(self): \"\"\"Configure logging using global verbosity level of distutils.\"\"\" loglevel_map =", "from the base directory and generates a Python package for each found Ecore", "for token in tokens: model, user_module = token.split('=', 1) # check if model", "dictionary output_path = pathlib.Path(output).relative_to('.') if model == 'default': self.output.default_factory = lambda: output_path else:", "file :return: root package of the Ecore model \"\"\" rset = pyecore.resources.ResourceSet() try:", "and output: # add relative output path to dictionary output_path = pathlib.Path(output).relative_to('.') if", "File extension of Ecore XMI file :cvar description: Description of ecore command :cvar", "which are binary \"\"\" _ECORE_FILE_EXT = 'ecore' description = 'generate Python code from", "of the setuptools command 'pyecore'.\"\"\" import collections import contextlib import distutils.log as logger", "logging.DEBUG }) logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search", "code generator for Ecore models. It searches for Ecore models starting from the", "and user module are specified if model and user_module: self.user_modules[model] = user_module else:", "for all Ecore XMI files starting from base directory and returns a list", "= True if resource.name in self.user_modules: kwargs['user_module'] = self.user_modules[resource.name] if self.output[resource.name]: output_dir =", "directory and generates a Python package for each found Ecore model. :cvar _ECORE_FILE_EXT:", "_load_ecore_model(ecore_model_path): \"\"\"Load a single Ecore model from a Ecore XMI file and return", "Ecore models. This process is controlled by the user options passed on the", "boolean_options = ['auto-register-package'] def initialize_options(self): \"\"\"Set default values for all the options that", "from base directory and returns a list of them. :param base_path: base path", "as logger import logging import pathlib import shlex import pyecore.resources import pyecoregen.ecore import", "self.auto_register_package = 0 def finalize_options(self): \"\"\"Set final values for all the options that", "that these defaults may be overridden by other commands, by the setup script,", "1) # check if model and user module are specified if model and", "# add relative output path to dictionary output_path = pathlib.Path(output).relative_to('.') if model ==", "Note that these defaults may be overridden by other commands, by the setup", "= 'generate Python code from Ecore models' user_options = [ ('ecore-models=', 'e', 'specify", "logging.WARNING, 1: logging.INFO, 2: logging.DEBUG }) logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] )", "output = token.split('=', 1) # check if model and output are specified if", "def run(self): \"\"\"Perform all tasks necessary to generate Python packages representing the classes", "output path to dictionary output_path = pathlib.Path(output).relative_to('.') if model == 'default': self.output.default_factory =", "of ecore command :cvar user_options: Options which can be passed by the user", "default values. \"\"\" self._configure_logging() # find Ecore XMI files ecore_xmi_files = self._find_ecore_xmi_files() #", "files in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load a single Ecore", "for ecore_xmi_file in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as resource: if self.ecore_models is None or", "PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command for generating Python code from Ecore models. An extra", "by the setup script, by config files, or by the command-line. \"\"\" self.ecore_models", "= [ ('ecore-models=', 'e', 'specify Ecore models to generate code for'), ('output=', 'o',", "have been done. \"\"\" # parse ecore-models option if self.ecore_models: self.ecore_models = shlex.split(self.ecore_models,", "\"\"\"A setuptools command for generating Python code from Ecore models. An extra command", "wraps pyecoregen - the real Python code generator for Ecore models. It searches", "@staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load a single Ecore model from a Ecore XMI", "for Ecore XMI files in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load", "import pathlib import shlex import pyecore.resources import pyecoregen.ecore import setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A", "\"\"\"Search for all Ecore XMI files starting from base directory and returns a", "which can be passed by the user :cvar boolean_options: Subset of user options", "mixins to import from ' 'generated classes'), ('auto-register-package', None, 'Generate package auto-registration for", "= ecore_xmi_file.parent # generate Python classes logger.info( 'running pyecoregen to generate code for", "import from ' 'generated classes'), ('auto-register-package', None, 'Generate package auto-registration for the PyEcore", "user :cvar boolean_options: Subset of user options which are binary \"\"\" _ECORE_FILE_EXT =", "ecore-models option if self.ecore_models: self.ecore_models = shlex.split(self.ecore_models, comments=True) # parse output option tokens", "to Ecore XMI file :return: root package of the Ecore model \"\"\" rset", "True if resource.name in self.user_modules: kwargs['user_module'] = self.user_modules[resource.name] if self.output[resource.name]: output_dir = self.output[resource.name]", "= collections.defaultdict(lambda: None) for token in tokens: model, output = token.split('=', 1) #", "are specified if model and output: # add relative output path to dictionary", "for setuptools to generate static Python classes from Ecore models. The pyecore command", "the options that this command supports. This is always called as late as", "Ecore model for ecore_xmi_file in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as resource: if self.ecore_models is", "self.ecore_models = None self.output = '' self.user_modules = '' self.auto_register_package = 0 def", "packages representing the classes from Ecore models. This process is controlled by the", "to generate static Python classes from Ecore models. The pyecore command wraps pyecoregen", "in tokens: model, output = token.split('=', 1) # check if model and output", "add relative output path to dictionary output_path = pathlib.Path(output).relative_to('.') if model == 'default':", "= output_path else: logger.warn('Ignoring invalid output specifier {!r}.', token) # parse user-modules option", "assignments from the command-line or from other commands have been done. \"\"\" #", "An extra command for setuptools to generate static Python classes from Ecore models.", "model and output are specified if model and output: # add relative output", "if self.auto_register_package: kwargs['auto_register_package'] = True if resource.name in self.user_modules: kwargs['user_module'] = self.user_modules[resource.name] if", "configure EcoreGenerator kwargs = {} if self.auto_register_package: kwargs['auto_register_package'] = True if resource.name in", "command wraps pyecoregen - the real Python code generator for Ecore models. It", "commands, by the setup script, by config files, or by the command-line. \"\"\"", "and generates a Python package for each found Ecore model. :cvar _ECORE_FILE_EXT: File", "pyecore.resources import pyecoregen.ecore import setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command for generating Python", "commands have been done. \"\"\" # parse ecore-models option if self.ecore_models: self.ecore_models =", "base path to search for Ecore XMI files :return: a list of all", "format='%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for all Ecore", "final values for all the options that this command supports. This is always", "import shlex import pyecore.resources import pyecoregen.ecore import setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command", "Ecore models starting from the base directory and generates a Python package for", "self.ecore_models: # configure EcoreGenerator kwargs = {} if self.auto_register_package: kwargs['auto_register_package'] = True if", "'generate Python code from Ecore models' user_options = [ ('ecore-models=', 'e', 'specify Ecore", "_find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for all Ecore XMI files starting from base directory and", "'pyecore'.\"\"\" import collections import contextlib import distutils.log as logger import logging import pathlib", "Ecore models to generate code for'), ('output=', 'o', 'specify directories where output is", "except Exception: raise else: rset.remove_resource(resource) def run(self): \"\"\"Perform all tasks necessary to generate", "setuptools command 'pyecore'.\"\"\" import collections import contextlib import distutils.log as logger import logging", "code for'), ('output=', 'o', 'specify directories where output is generated'), ('user-modules=', None, 'dotted", "by the user :cvar boolean_options: Subset of user options which are binary \"\"\"", "self.output[model] = output_path else: logger.warn('Ignoring invalid output specifier {!r}.', token) # parse user-modules", "return the root package. :param ecore_model_path: path to Ecore XMI file :return: root", "resource.name in self.user_modules: kwargs['user_module'] = self.user_modules[resource.name] if self.output[resource.name]: output_dir = self.output[resource.name] else: output_dir", "token) # parse user-modules option tokens = shlex.split(self.user_modules, comments=True) self.user_modules = {} for", "class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command for generating Python code from Ecore models. An", "logging using global verbosity level of distutils.\"\"\" loglevel_map = collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0:", "%(message)s', level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for all Ecore XMI files starting", "returns a list of them. :param base_path: base path to search for Ecore", "the real Python code generator for Ecore models. It searches for Ecore models", "None or resource.name in self.ecore_models: # configure EcoreGenerator kwargs = {} if self.auto_register_package:", "options passed on the command line or set internally to default values. \"\"\"", "late as possible, ie. after any option assignments from the command-line or from", "the user :cvar boolean_options: Subset of user options which are binary \"\"\" _ECORE_FILE_EXT", "found Ecore model. :cvar _ECORE_FILE_EXT: File extension of Ecore XMI file :cvar description:", "model, output = token.split('=', 1) # check if model and output are specified", "output specifier {!r}.', token) # parse user-modules option tokens = shlex.split(self.user_modules, comments=True) self.user_modules", "that this command supports. This is always called as late as possible, ie.", "logger.info( 'running pyecoregen to generate code for {!r} metamodel'.format(resource.name) ) pyecoregen.ecore.EcoreGenerator(**kwargs).generate( resource, output_dir.as_posix()", "Ecore XMI file and return the root package. :param ecore_model_path: path to Ecore", "the options that this command supports. Note that these defaults may be overridden", "parse user-modules option tokens = shlex.split(self.user_modules, comments=True) self.user_modules = {} for token in", "logger.warn('Ignoring invalid user module specifier {!r}.', token) def _configure_logging(self): \"\"\"Configure logging using global", "internally to default values. \"\"\" self._configure_logging() # find Ecore XMI files ecore_xmi_files =", "Ecore models. The pyecore command wraps pyecoregen - the real Python code generator", "other commands, by the setup script, by config files, or by the command-line.", "PyEcore ' '\\'global_registry\\''), ] boolean_options = ['auto-register-package'] def initialize_options(self): \"\"\"Set default values for", "Description of ecore command :cvar user_options: Options which can be passed by the", "# check if model and user module are specified if model and user_module:", "'generated classes'), ('auto-register-package', None, 'Generate package auto-registration for the PyEcore ' '\\'global_registry\\''), ]", "[ ('ecore-models=', 'e', 'specify Ecore models to generate code for'), ('output=', 'o', 'specify", "or from other commands have been done. \"\"\" # parse ecore-models option if", "' '\\'global_registry\\''), ] boolean_options = ['auto-register-package'] def initialize_options(self): \"\"\"Set default values for all", "is always called as late as possible, ie. after any option assignments from", "self.user_modules: kwargs['user_module'] = self.user_modules[resource.name] if self.output[resource.name]: output_dir = self.output[resource.name] else: output_dir = ecore_xmi_file.parent", "= lambda: output_path else: self.output[model] = output_path else: logger.warn('Ignoring invalid output specifier {!r}.',", "passed by the user :cvar boolean_options: Subset of user options which are binary", "# load each Ecore model for ecore_xmi_file in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as resource:", "binary \"\"\" _ECORE_FILE_EXT = 'ecore' description = 'generate Python code from Ecore models'", "option if self.ecore_models: self.ecore_models = shlex.split(self.ecore_models, comments=True) # parse output option tokens =", ":return: root package of the Ecore model \"\"\" rset = pyecore.resources.ResourceSet() try: logger.debug('loading", "to generate code for {!r} metamodel'.format(resource.name) ) pyecoregen.ecore.EcoreGenerator(**kwargs).generate( resource, output_dir.as_posix() ) else: logger.debug('skipping", "This process is controlled by the user options passed on the command line", "pathlib.Path(output).relative_to('.') if model == 'default': self.output.default_factory = lambda: output_path else: self.output[model] = output_path", "tokens: model, user_module = token.split('=', 1) # check if model and user module", "self.output[resource.name] else: output_dir = ecore_xmi_file.parent # generate Python classes logger.info( 'running pyecoregen to", "specified if model and output: # add relative output path to dictionary output_path", "Options which can be passed by the user :cvar boolean_options: Subset of user", "('ecore-models=', 'e', 'specify Ecore models to generate code for'), ('output=', 'o', 'specify directories", "found Ecore XMI files \"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore XMI files", "Python package for each found Ecore model. :cvar _ECORE_FILE_EXT: File extension of Ecore", "command line or set internally to default values. \"\"\" self._configure_logging() # find Ecore", "ecore_xmi_files = self._find_ecore_xmi_files() # load each Ecore model for ecore_xmi_file in ecore_xmi_files: with", "= shlex.split(self.output, comments=True) self.output = collections.defaultdict(lambda: None) for token in tokens: model, output", "# find Ecore XMI files ecore_xmi_files = self._find_ecore_xmi_files() # load each Ecore model", "= token.split('=', 1) # check if model and user module are specified if", "output option tokens = shlex.split(self.output, comments=True) self.output = collections.defaultdict(lambda: None) for token in", "default values for all the options that this command supports. Note that these", "specified if model and user_module: self.user_modules[model] = user_module else: logger.warn('Ignoring invalid user module", "defaults may be overridden by other commands, by the setup script, by config", "It searches for Ecore models starting from the base directory and generates a", "finalize_options(self): \"\"\"Set final values for all the options that this command supports. This", "specifier {!r}.', token) def _configure_logging(self): \"\"\"Configure logging using global verbosity level of distutils.\"\"\"", "module are specified if model and user_module: self.user_modules[model] = user_module else: logger.warn('Ignoring invalid", ":param base_path: base path to search for Ecore XMI files :return: a list", "tokens: model, output = token.split('=', 1) # check if model and output are", "Ecore XMI file :cvar description: Description of ecore command :cvar user_options: Options which", "package. :param ecore_model_path: path to Ecore XMI file :return: root package of the", "in self.ecore_models: # configure EcoreGenerator kwargs = {} if self.auto_register_package: kwargs['auto_register_package'] = True", "the classes from Ecore models. This process is controlled by the user options", "options which are binary \"\"\" _ECORE_FILE_EXT = 'ecore' description = 'generate Python code", "XMI file :return: root package of the Ecore model \"\"\" rset = pyecore.resources.ResourceSet()", "pyecoregen.ecore import setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command for generating Python code from", "command supports. This is always called as late as possible, ie. after any", ":return: a list of all found Ecore XMI files \"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT)", "directory and returns a list of them. :param base_path: base path to search", "rset.remove_resource(resource) def run(self): \"\"\"Perform all tasks necessary to generate Python packages representing the", "self._find_ecore_xmi_files() # load each Ecore model for ecore_xmi_file in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as", "shlex.split(self.ecore_models, comments=True) # parse output option tokens = shlex.split(self.output, comments=True) self.output = collections.defaultdict(lambda:", "import pyecoregen.ecore import setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command for generating Python code", "None) for token in tokens: model, output = token.split('=', 1) # check if", "load each Ecore model for ecore_xmi_file in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as resource: if", "token.split('=', 1) # check if model and output are specified if model and", "\"\"\" self.ecore_models = None self.output = '' self.user_modules = '' self.auto_register_package = 0", "by other commands, by the setup script, by config files, or by the", "file :cvar description: Description of ecore command :cvar user_options: Options which can be", "models to generate code for'), ('output=', 'o', 'specify directories where output is generated'),", "{!r}.', token) def _configure_logging(self): \"\"\"Configure logging using global verbosity level of distutils.\"\"\" loglevel_map", "option assignments from the command-line or from other commands have been done. \"\"\"", "def finalize_options(self): \"\"\"Set final values for all the options that this command supports.", "XMI files starting from base directory and returns a list of them. :param", "to search for Ecore XMI files :return: a list of all found Ecore", "ecore_xmi_file.parent # generate Python classes logger.info( 'running pyecoregen to generate code for {!r}", "_configure_logging(self): \"\"\"Configure logging using global verbosity level of distutils.\"\"\" loglevel_map = collections.defaultdict(lambda: logging.WARNING)", "a list of all found Ecore XMI files \"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching", "shlex.split(self.output, comments=True) self.output = collections.defaultdict(lambda: None) for token in tokens: model, output =", "The pyecore command wraps pyecoregen - the real Python code generator for Ecore", "or resource.name in self.ecore_models: # configure EcoreGenerator kwargs = {} if self.auto_register_package: kwargs['auto_register_package']", "the command-line or from other commands have been done. \"\"\" # parse ecore-models", "comments=True) # parse output option tokens = shlex.split(self.output, comments=True) self.output = collections.defaultdict(lambda: None)", "generate static Python classes from Ecore models. The pyecore command wraps pyecoregen -", "code from Ecore models' user_options = [ ('ecore-models=', 'e', 'specify Ecore models to", "distutils.log as logger import logging import pathlib import shlex import pyecore.resources import pyecoregen.ecore", "resource.name in self.ecore_models: # configure EcoreGenerator kwargs = {} if self.auto_register_package: kwargs['auto_register_package'] =", "logging import pathlib import shlex import pyecore.resources import pyecoregen.ecore import setuptools class PyEcoreCommand(setuptools.Command):", "values for all the options that this command supports. Note that these defaults", "# configure EcoreGenerator kwargs = {} if self.auto_register_package: kwargs['auto_register_package'] = True if resource.name", "the base directory and generates a Python package for each found Ecore model.", "description: Description of ecore command :cvar user_options: Options which can be passed by", "model. :cvar _ECORE_FILE_EXT: File extension of Ecore XMI file :cvar description: Description of", "generates a Python package for each found Ecore model. :cvar _ECORE_FILE_EXT: File extension", "to default values. \"\"\" self._configure_logging() # find Ecore XMI files ecore_xmi_files = self._find_ecore_xmi_files()", "{} if self.auto_register_package: kwargs['auto_register_package'] = True if resource.name in self.user_modules: kwargs['user_module'] = self.user_modules[resource.name]", "== 'default': self.output.default_factory = lambda: output_path else: self.output[model] = output_path else: logger.warn('Ignoring invalid", "def _configure_logging(self): \"\"\"Configure logging using global verbosity level of distutils.\"\"\" loglevel_map = collections.defaultdict(lambda:", "= {} if self.auto_register_package: kwargs['auto_register_package'] = True if resource.name in self.user_modules: kwargs['user_module'] =", "all found Ecore XMI files \"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore XMI", "user module specifier {!r}.', token) def _configure_logging(self): \"\"\"Configure logging using global verbosity level", "classes'), ('auto-register-package', None, 'Generate package auto-registration for the PyEcore ' '\\'global_registry\\''), ] boolean_options", "'Generate package auto-registration for the PyEcore ' '\\'global_registry\\''), ] boolean_options = ['auto-register-package'] def", "_ECORE_FILE_EXT = 'ecore' description = 'generate Python code from Ecore models' user_options =", "}) logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for", "else: logger.warn('Ignoring invalid output specifier {!r}.', token) # parse user-modules option tokens =", "classes logger.info( 'running pyecoregen to generate code for {!r} metamodel'.format(resource.name) ) pyecoregen.ecore.EcoreGenerator(**kwargs).generate( resource,", "for Ecore XMI files :return: a list of all found Ecore XMI files", "model \"\"\" rset = pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0]", "generate Python packages representing the classes from Ecore models. This process is controlled", "models. An extra command for setuptools to generate static Python classes from Ecore", "- the real Python code generator for Ecore models. It searches for Ecore", "\"\"\"Set default values for all the options that this command supports. Note that", "from a Ecore XMI file and return the root package. :param ecore_model_path: path", "self.output = '' self.user_modules = '' self.auto_register_package = 0 def finalize_options(self): \"\"\"Set final", "are specified if model and user_module: self.user_modules[model] = user_module else: logger.warn('Ignoring invalid user", "collections.defaultdict(lambda: None) for token in tokens: model, output = token.split('=', 1) # check", "{!r}.', token) # parse user-modules option tokens = shlex.split(self.user_modules, comments=True) self.user_modules = {}", "supports. This is always called as late as possible, ie. after any option", "self._configure_logging() # find Ecore XMI files ecore_xmi_files = self._find_ecore_xmi_files() # load each Ecore", "to dictionary output_path = pathlib.Path(output).relative_to('.') if model == 'default': self.output.default_factory = lambda: output_path", "by config files, or by the command-line. \"\"\" self.ecore_models = None self.output =", "script, by config files, or by the command-line. \"\"\" self.ecore_models = None self.output", "else: self.output[model] = output_path else: logger.warn('Ignoring invalid output specifier {!r}.', token) # parse", "Ecore models' user_options = [ ('ecore-models=', 'e', 'specify Ecore models to generate code", "if resource.name in self.user_modules: kwargs['user_module'] = self.user_modules[resource.name] if self.output[resource.name]: output_dir = self.output[resource.name] else:", "= shlex.split(self.user_modules, comments=True) self.user_modules = {} for token in tokens: model, user_module =", "self.output[resource.name]: output_dir = self.output[resource.name] else: output_dir = ecore_xmi_file.parent # generate Python classes logger.info(", "setuptools command for generating Python code from Ecore models. An extra command for", "as resource: if self.ecore_models is None or resource.name in self.ecore_models: # configure EcoreGenerator", "option tokens = shlex.split(self.user_modules, comments=True) self.user_modules = {} for token in tokens: model,", "= ['auto-register-package'] def initialize_options(self): \"\"\"Set default values for all the options that this", "generating Python code from Ecore models. An extra command for setuptools to generate", "verbosity level of distutils.\"\"\" loglevel_map = collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0: logging.WARNING, 1: logging.INFO,", "Ecore XMI files in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load a", "ecore_model_path: path to Ecore XMI file :return: root package of the Ecore model", "these defaults may be overridden by other commands, by the setup script, by", "Ecore model. :cvar _ECORE_FILE_EXT: File extension of Ecore XMI file :cvar description: Description", "resource: if self.ecore_models is None or resource.name in self.ecore_models: # configure EcoreGenerator kwargs", "for generating Python code from Ecore models. An extra command for setuptools to", "%(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for all Ecore XMI", "user_options = [ ('ecore-models=', 'e', 'specify Ecore models to generate code for'), ('output=',", "package for each found Ecore model. :cvar _ECORE_FILE_EXT: File extension of Ecore XMI", "files, or by the command-line. \"\"\" self.ecore_models = None self.output = '' self.user_modules", "if model and output are specified if model and output: # add relative", "'specify Ecore models to generate code for'), ('output=', 'o', 'specify directories where output", "= pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except Exception: raise", "them. :param base_path: base path to search for Ecore XMI files :return: a", "config files, or by the command-line. \"\"\" self.ecore_models = None self.output = ''", "try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except Exception: raise else: rset.remove_resource(resource)", "base_path: base path to search for Ecore XMI files :return: a list of", "model == 'default': self.output.default_factory = lambda: output_path else: self.output[model] = output_path else: logger.warn('Ignoring", "and output are specified if model and output: # add relative output path", "values. \"\"\" self._configure_logging() # find Ecore XMI files ecore_xmi_files = self._find_ecore_xmi_files() # load", "all the options that this command supports. Note that these defaults may be", "user_module = token.split('=', 1) # check if model and user module are specified", "list of all found Ecore XMI files \"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for", "if self.output[resource.name]: output_dir = self.output[resource.name] else: output_dir = ecore_xmi_file.parent # generate Python classes", "description = 'generate Python code from Ecore models' user_options = [ ('ecore-models=', 'e',", "kwargs['user_module'] = self.user_modules[resource.name] if self.output[resource.name]: output_dir = self.output[resource.name] else: output_dir = ecore_xmi_file.parent #", "auto-registration for the PyEcore ' '\\'global_registry\\''), ] boolean_options = ['auto-register-package'] def initialize_options(self): \"\"\"Set", "output_dir = self.output[resource.name] else: output_dir = ecore_xmi_file.parent # generate Python classes logger.info( 'running", "Ecore XMI files starting from base directory and returns a list of them.", "options that this command supports. This is always called as late as possible,", "Ecore XMI files :return: a list of all found Ecore XMI files \"\"\"", "Python code from Ecore models. An extra command for setuptools to generate static", "generate Python classes logger.info( 'running pyecoregen to generate code for {!r} metamodel'.format(resource.name) )", "output_path = pathlib.Path(output).relative_to('.') if model == 'default': self.output.default_factory = lambda: output_path else: self.output[model]", "to generate code for'), ('output=', 'o', 'specify directories where output is generated'), ('user-modules=',", "option tokens = shlex.split(self.output, comments=True) self.output = collections.defaultdict(lambda: None) for token in tokens:", "a Ecore XMI file and return the root package. :param ecore_model_path: path to", "else: rset.remove_resource(resource) def run(self): \"\"\"Perform all tasks necessary to generate Python packages representing", "with user-provided mixins to import from ' 'generated classes'), ('auto-register-package', None, 'Generate package", "= rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except Exception: raise else: rset.remove_resource(resource) def run(self): \"\"\"Perform all", "# parse user-modules option tokens = shlex.split(self.user_modules, comments=True) self.user_modules = {} for token", "output are specified if model and output: # add relative output path to", "import collections import contextlib import distutils.log as logger import logging import pathlib import", "Python classes logger.info( 'running pyecoregen to generate code for {!r} metamodel'.format(resource.name) ) pyecoregen.ecore.EcoreGenerator(**kwargs).generate(", "where output is generated'), ('user-modules=', None, 'dotted names of modules with user-provided mixins", "pyecoregen to generate code for {!r} metamodel'.format(resource.name) ) pyecoregen.ecore.EcoreGenerator(**kwargs).generate( resource, output_dir.as_posix() ) else:", ":cvar description: Description of ecore command :cvar user_options: Options which can be passed", "root package. :param ecore_model_path: path to Ecore XMI file :return: root package of", "boolean_options: Subset of user options which are binary \"\"\" _ECORE_FILE_EXT = 'ecore' description", "pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore XMI files in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod", "self.output.default_factory = lambda: output_path else: self.output[model] = output_path else: logger.warn('Ignoring invalid output specifier", "= token.split('=', 1) # check if model and output are specified if model", "Exception: raise else: rset.remove_resource(resource) def run(self): \"\"\"Perform all tasks necessary to generate Python", "self.ecore_models = shlex.split(self.ecore_models, comments=True) # parse output option tokens = shlex.split(self.output, comments=True) self.output", "'\\'global_registry\\''), ] boolean_options = ['auto-register-package'] def initialize_options(self): \"\"\"Set default values for all the", "= pathlib.Path(output).relative_to('.') if model == 'default': self.output.default_factory = lambda: output_path else: self.output[model] =", "check if model and output are specified if model and output: # add", "extra command for setuptools to generate static Python classes from Ecore models. The", "check if model and user module are specified if model and user_module: self.user_modules[model]", "self.output = collections.defaultdict(lambda: None) for token in tokens: model, output = token.split('=', 1)", "'' self.user_modules = '' self.auto_register_package = 0 def finalize_options(self): \"\"\"Set final values for", "comments=True) self.user_modules = {} for token in tokens: model, user_module = token.split('=', 1)", "all the options that this command supports. This is always called as late", "module specifier {!r}.', token) def _configure_logging(self): \"\"\"Configure logging using global verbosity level of", "command-line or from other commands have been done. \"\"\" # parse ecore-models option", "= {} for token in tokens: model, user_module = token.split('=', 1) # check", "invalid user module specifier {!r}.', token) def _configure_logging(self): \"\"\"Configure logging using global verbosity", "loglevel_map.update({ 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG }) logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s] %(message)s',", "other commands have been done. \"\"\" # parse ecore-models option if self.ecore_models: self.ecore_models", "is generated'), ('user-modules=', None, 'dotted names of modules with user-provided mixins to import", "resource.contents[0] except Exception: raise else: rset.remove_resource(resource) def run(self): \"\"\"Perform all tasks necessary to", "= self.user_modules[resource.name] if self.output[resource.name]: output_dir = self.output[resource.name] else: output_dir = ecore_xmi_file.parent # generate", "output_dir = ecore_xmi_file.parent # generate Python classes logger.info( 'running pyecoregen to generate code", "this command supports. Note that these defaults may be overridden by other commands,", "is controlled by the user options passed on the command line or set", "passed on the command line or set internally to default values. \"\"\" self._configure_logging()", "a Python package for each found Ecore model. :cvar _ECORE_FILE_EXT: File extension of", "ie. after any option assignments from the command-line or from other commands have", "self.user_modules = {} for token in tokens: model, user_module = token.split('=', 1) #", "\"\"\"Perform all tasks necessary to generate Python packages representing the classes from Ecore", "pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except Exception: raise else:", "collections import contextlib import distutils.log as logger import logging import pathlib import shlex", "output: # add relative output path to dictionary output_path = pathlib.Path(output).relative_to('.') if model", "run(self): \"\"\"Perform all tasks necessary to generate Python packages representing the classes from", "\"\"\" self._configure_logging() # find Ecore XMI files ecore_xmi_files = self._find_ecore_xmi_files() # load each", "generated'), ('user-modules=', None, 'dotted names of modules with user-provided mixins to import from", "def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for all Ecore XMI files starting from base directory", "done. \"\"\" # parse ecore-models option if self.ecore_models: self.ecore_models = shlex.split(self.ecore_models, comments=True) #", "collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG }) logging.basicConfig( format='%(asctime)s %(levelname)s", "search for Ecore XMI files :return: a list of all found Ecore XMI", "command for generating Python code from Ecore models. An extra command for setuptools", "process is controlled by the user options passed on the command line or", "values for all the options that this command supports. This is always called", "the user options passed on the command line or set internally to default", "all tasks necessary to generate Python packages representing the classes from Ecore models.", "base_path=pathlib.Path('.')): \"\"\"Search for all Ecore XMI files starting from base directory and returns", "or set internally to default values. \"\"\" self._configure_logging() # find Ecore XMI files", "import logging import pathlib import shlex import pyecore.resources import pyecoregen.ecore import setuptools class", "= 'ecore' description = 'generate Python code from Ecore models' user_options = [", "'o', 'specify directories where output is generated'), ('user-modules=', None, 'dotted names of modules", "directories where output is generated'), ('user-modules=', None, 'dotted names of modules with user-provided", "path to search for Ecore XMI files :return: a list of all found", "package of the Ecore model \"\"\" rset = pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource", "\\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except Exception: raise else: rset.remove_resource(resource) def run(self):", "resource = rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except Exception: raise else: rset.remove_resource(resource) def run(self): \"\"\"Perform", "and user_module: self.user_modules[model] = user_module else: logger.warn('Ignoring invalid user module specifier {!r}.', token)", "set internally to default values. \"\"\" self._configure_logging() # find Ecore XMI files ecore_xmi_files", "from Ecore models' user_options = [ ('ecore-models=', 'e', 'specify Ecore models to generate", "global verbosity level of distutils.\"\"\" loglevel_map = collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0: logging.WARNING, 1:", "in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load a single Ecore model", "for Ecore models. It searches for Ecore models starting from the base directory", "generate code for'), ('output=', 'o', 'specify directories where output is generated'), ('user-modules=', None,", "models. This process is controlled by the user options passed on the command", "as late as possible, ie. after any option assignments from the command-line or", "ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as resource: if self.ecore_models is None or resource.name in self.ecore_models:", "code from Ecore models. An extra command for setuptools to generate static Python", "= collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG }) logging.basicConfig( format='%(asctime)s", "] boolean_options = ['auto-register-package'] def initialize_options(self): \"\"\"Set default values for all the options", "XMI files :return: a list of all found Ecore XMI files \"\"\" pattern", "the PyEcore ' '\\'global_registry\\''), ] boolean_options = ['auto-register-package'] def initialize_options(self): \"\"\"Set default values", "'*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore XMI files in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def", "if model and output: # add relative output path to dictionary output_path =", "def _load_ecore_model(ecore_model_path): \"\"\"Load a single Ecore model from a Ecore XMI file and", "setuptools to generate static Python classes from Ecore models. The pyecore command wraps", "if model and user module are specified if model and user_module: self.user_modules[model] =", "controlled by the user options passed on the command line or set internally", "static Python classes from Ecore models. The pyecore command wraps pyecoregen - the", "('user-modules=', None, 'dotted names of modules with user-provided mixins to import from '", "tokens = shlex.split(self.user_modules, comments=True) self.user_modules = {} for token in tokens: model, user_module", "yield resource.contents[0] except Exception: raise else: rset.remove_resource(resource) def run(self): \"\"\"Perform all tasks necessary", "from the command-line or from other commands have been done. \"\"\" # parse", "kwargs['auto_register_package'] = True if resource.name in self.user_modules: kwargs['user_module'] = self.user_modules[resource.name] if self.output[resource.name]: output_dir", "# check if model and output are specified if model and output: #", "model and output: # add relative output path to dictionary output_path = pathlib.Path(output).relative_to('.')", "the setuptools command 'pyecore'.\"\"\" import collections import contextlib import distutils.log as logger import", "any option assignments from the command-line or from other commands have been done.", "if self.ecore_models is None or resource.name in self.ecore_models: # configure EcoreGenerator kwargs =", "Python classes from Ecore models. The pyecore command wraps pyecoregen - the real", "a list of them. :param base_path: base path to search for Ecore XMI", "of user options which are binary \"\"\" _ECORE_FILE_EXT = 'ecore' description = 'generate", "command :cvar user_options: Options which can be passed by the user :cvar boolean_options:", "raise else: rset.remove_resource(resource) def run(self): \"\"\"Perform all tasks necessary to generate Python packages", "path to Ecore XMI file :return: root package of the Ecore model \"\"\"", "setup script, by config files, or by the command-line. \"\"\" self.ecore_models = None", "import pyecore.resources import pyecoregen.ecore import setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command for generating", "Ecore models. An extra command for setuptools to generate static Python classes from", "self.user_modules[resource.name] if self.output[resource.name]: output_dir = self.output[resource.name] else: output_dir = ecore_xmi_file.parent # generate Python", "_ECORE_FILE_EXT: File extension of Ecore XMI file :cvar description: Description of ecore command", "the root package. :param ecore_model_path: path to Ecore XMI file :return: root package", "each found Ecore model. :cvar _ECORE_FILE_EXT: File extension of Ecore XMI file :cvar", "and return the root package. :param ecore_model_path: path to Ecore XMI file :return:", "of Ecore XMI file :cvar description: Description of ecore command :cvar user_options: Options", "= 0 def finalize_options(self): \"\"\"Set final values for all the options that this", "pyecore command wraps pyecoregen - the real Python code generator for Ecore models.", "user options which are binary \"\"\" _ECORE_FILE_EXT = 'ecore' description = 'generate Python", "model, user_module = token.split('=', 1) # check if model and user module are", "is None or resource.name in self.ecore_models: # configure EcoreGenerator kwargs = {} if", "ecore_xmi_file in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as resource: if self.ecore_models is None or resource.name", "root package of the Ecore model \"\"\" rset = pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path)))", "to generate Python packages representing the classes from Ecore models. This process is", "of them. :param base_path: base path to search for Ecore XMI files :return:", "Python packages representing the classes from Ecore models. This process is controlled by", "'' self.auto_register_package = 0 def finalize_options(self): \"\"\"Set final values for all the options", "to import from ' 'generated classes'), ('auto-register-package', None, 'Generate package auto-registration for the", "XMI files ecore_xmi_files = self._find_ecore_xmi_files() # load each Ecore model for ecore_xmi_file in", "else: output_dir = ecore_xmi_file.parent # generate Python classes logger.info( 'running pyecoregen to generate", "model and user_module: self.user_modules[model] = user_module else: logger.warn('Ignoring invalid user module specifier {!r}.',", "specifier {!r}.', token) # parse user-modules option tokens = shlex.split(self.user_modules, comments=True) self.user_modules =", "self.auto_register_package: kwargs['auto_register_package'] = True if resource.name in self.user_modules: kwargs['user_module'] = self.user_modules[resource.name] if self.output[resource.name]:", "a single Ecore model from a Ecore XMI file and return the root", ":param ecore_model_path: path to Ecore XMI file :return: root package of the Ecore", "logging.WARNING) loglevel_map.update({ 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG }) logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s]", "user_module: self.user_modules[model] = user_module else: logger.warn('Ignoring invalid user module specifier {!r}.', token) def", "= '' self.auto_register_package = 0 def finalize_options(self): \"\"\"Set final values for all the", "classes from Ecore models. The pyecore command wraps pyecoregen - the real Python", "token in tokens: model, user_module = token.split('=', 1) # check if model and", "using global verbosity level of distutils.\"\"\" loglevel_map = collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0: logging.WARNING,", "the command line or set internally to default values. \"\"\" self._configure_logging() # find", "rset = pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except Exception:", "supports. Note that these defaults may be overridden by other commands, by the", "pathlib import shlex import pyecore.resources import pyecoregen.ecore import setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools", "self.ecore_models is None or resource.name in self.ecore_models: # configure EcoreGenerator kwargs = {}", "searches for Ecore models starting from the base directory and generates a Python", "Ecore XMI files ecore_xmi_files = self._find_ecore_xmi_files() # load each Ecore model for ecore_xmi_file", "1: logging.INFO, 2: logging.DEBUG }) logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] ) def", "Subset of user options which are binary \"\"\" _ECORE_FILE_EXT = 'ecore' description =", "always called as late as possible, ie. after any option assignments from the", "'dotted names of modules with user-provided mixins to import from ' 'generated classes'),", "\"\"\" rset = pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except", "= user_module else: logger.warn('Ignoring invalid user module specifier {!r}.', token) def _configure_logging(self): \"\"\"Configure", "from ' 'generated classes'), ('auto-register-package', None, 'Generate package auto-registration for the PyEcore '", "the Ecore model \"\"\" rset = pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix())", "if model == 'default': self.output.default_factory = lambda: output_path else: self.output[model] = output_path else:", "pyecoregen - the real Python code generator for Ecore models. It searches for", "files starting from base directory and returns a list of them. :param base_path:", "of distutils.\"\"\" loglevel_map = collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG", "of modules with user-provided mixins to import from ' 'generated classes'), ('auto-register-package', None,", "path to dictionary output_path = pathlib.Path(output).relative_to('.') if model == 'default': self.output.default_factory = lambda:", "distutils.\"\"\" loglevel_map = collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG })", ":cvar _ECORE_FILE_EXT: File extension of Ecore XMI file :cvar description: Description of ecore", "are binary \"\"\" _ECORE_FILE_EXT = 'ecore' description = 'generate Python code from Ecore", "0 def finalize_options(self): \"\"\"Set final values for all the options that this command", "models starting from the base directory and generates a Python package for each", "output_path else: self.output[model] = output_path else: logger.warn('Ignoring invalid output specifier {!r}.', token) #", "by the command-line. \"\"\" self.ecore_models = None self.output = '' self.user_modules = ''", "user options passed on the command line or set internally to default values.", "been done. \"\"\" # parse ecore-models option if self.ecore_models: self.ecore_models = shlex.split(self.ecore_models, comments=True)", "parse ecore-models option if self.ecore_models: self.ecore_models = shlex.split(self.ecore_models, comments=True) # parse output option", "representing the classes from Ecore models. This process is controlled by the user", "setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command for generating Python code from Ecore models.", "['auto-register-package'] def initialize_options(self): \"\"\"Set default values for all the options that this command", "from Ecore models. The pyecore command wraps pyecoregen - the real Python code", "sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load a single Ecore model from a Ecore", "real Python code generator for Ecore models. It searches for Ecore models starting", "XMI files in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load a single", "def initialize_options(self): \"\"\"Set default values for all the options that this command supports.", "files :return: a list of all found Ecore XMI files \"\"\" pattern =", "if self.ecore_models: self.ecore_models = shlex.split(self.ecore_models, comments=True) # parse output option tokens = shlex.split(self.output,", "2: logging.DEBUG }) logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')):", "[%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for all Ecore XMI files", "models. The pyecore command wraps pyecoregen - the real Python code generator for", ") def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for all Ecore XMI files starting from base", "for token in tokens: model, output = token.split('=', 1) # check if model", "each Ecore model for ecore_xmi_file in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as resource: if self.ecore_models", "command for setuptools to generate static Python classes from Ecore models. The pyecore", "command-line. \"\"\" self.ecore_models = None self.output = '' self.user_modules = '' self.auto_register_package =", "'specify directories where output is generated'), ('user-modules=', None, 'dotted names of modules with", "logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except Exception: raise else: rset.remove_resource(resource) def", "self._load_ecore_model(ecore_xmi_file) as resource: if self.ecore_models is None or resource.name in self.ecore_models: # configure", "that this command supports. Note that these defaults may be overridden by other", "0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG }) logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose]", "lambda: output_path else: self.output[model] = output_path else: logger.warn('Ignoring invalid output specifier {!r}.', token)", "= '' self.user_modules = '' self.auto_register_package = 0 def finalize_options(self): \"\"\"Set final values", "user-modules option tokens = shlex.split(self.user_modules, comments=True) self.user_modules = {} for token in tokens:", "by the user options passed on the command line or set internally to", "overridden by other commands, by the setup script, by config files, or by", "= self._find_ecore_xmi_files() # load each Ecore model for ecore_xmi_file in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file)", "output_path else: logger.warn('Ignoring invalid output specifier {!r}.', token) # parse user-modules option tokens", "\"\"\"Load a single Ecore model from a Ecore XMI file and return the", "user-provided mixins to import from ' 'generated classes'), ('auto-register-package', None, 'Generate package auto-registration", "\"\"\"Implementation of the setuptools command 'pyecore'.\"\"\" import collections import contextlib import distutils.log as", "for'), ('output=', 'o', 'specify directories where output is generated'), ('user-modules=', None, 'dotted names", "from other commands have been done. \"\"\" # parse ecore-models option if self.ecore_models:", "command 'pyecore'.\"\"\" import collections import contextlib import distutils.log as logger import logging import", "models' user_options = [ ('ecore-models=', 'e', 'specify Ecore models to generate code for'),", "import contextlib import distutils.log as logger import logging import pathlib import shlex import", "logger.debug('searching for Ecore XMI files in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path):", "# parse ecore-models option if self.ecore_models: self.ecore_models = shlex.split(self.ecore_models, comments=True) # parse output", "Ecore XMI file :return: root package of the Ecore model \"\"\" rset =", "extension of Ecore XMI file :cvar description: Description of ecore command :cvar user_options:", "Ecore models. It searches for Ecore models starting from the base directory and", ":cvar user_options: Options which can be passed by the user :cvar boolean_options: Subset", "called as late as possible, ie. after any option assignments from the command-line", "line or set internally to default values. \"\"\" self._configure_logging() # find Ecore XMI", "import setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command for generating Python code from Ecore", "Python code from Ecore models' user_options = [ ('ecore-models=', 'e', 'specify Ecore models", "None, 'Generate package auto-registration for the PyEcore ' '\\'global_registry\\''), ] boolean_options = ['auto-register-package']", "of all found Ecore XMI files \"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore", "options that this command supports. Note that these defaults may be overridden by", "files ecore_xmi_files = self._find_ecore_xmi_files() # load each Ecore model for ecore_xmi_file in ecore_xmi_files:", "with self._load_ecore_model(ecore_xmi_file) as resource: if self.ecore_models is None or resource.name in self.ecore_models: #", "\"\"\"Set final values for all the options that this command supports. This is", "else: logger.warn('Ignoring invalid user module specifier {!r}.', token) def _configure_logging(self): \"\"\"Configure logging using", "logging.basicConfig( format='%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[self.distribution.verbose] ) def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')): \"\"\"Search for all", "files \"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore XMI files in \\'{!s}\\''.format(str(base_path))) return", "None, 'dotted names of modules with user-provided mixins to import from ' 'generated", "on the command line or set internally to default values. \"\"\" self._configure_logging() #", "kwargs = {} if self.auto_register_package: kwargs['auto_register_package'] = True if resource.name in self.user_modules: kwargs['user_module']", "be passed by the user :cvar boolean_options: Subset of user options which are", "\"\"\" # parse ecore-models option if self.ecore_models: self.ecore_models = shlex.split(self.ecore_models, comments=True) # parse", "Python code generator for Ecore models. It searches for Ecore models starting from", "generator for Ecore models. It searches for Ecore models starting from the base", "in tokens: model, user_module = token.split('=', 1) # check if model and user", "'running pyecoregen to generate code for {!r} metamodel'.format(resource.name) ) pyecoregen.ecore.EcoreGenerator(**kwargs).generate( resource, output_dir.as_posix() )", "code for {!r} metamodel'.format(resource.name) ) pyecoregen.ecore.EcoreGenerator(**kwargs).generate( resource, output_dir.as_posix() ) else: logger.debug('skipping {!r} metamodel'.format(resource.name))", "in self.user_modules: kwargs['user_module'] = self.user_modules[resource.name] if self.output[resource.name]: output_dir = self.output[resource.name] else: output_dir =", "= '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore XMI files in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager", "single Ecore model from a Ecore XMI file and return the root package.", "user module are specified if model and user_module: self.user_modules[model] = user_module else: logger.warn('Ignoring", "logger import logging import pathlib import shlex import pyecore.resources import pyecoregen.ecore import setuptools", "\"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore XMI files in \\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern))", "self.user_modules[model] = user_module else: logger.warn('Ignoring invalid user module specifier {!r}.', token) def _configure_logging(self):", "the setup script, by config files, or by the command-line. \"\"\" self.ecore_models =", "be overridden by other commands, by the setup script, by config files, or", "user_options: Options which can be passed by the user :cvar boolean_options: Subset of", "XMI file :cvar description: Description of ecore command :cvar user_options: Options which can", "= shlex.split(self.ecore_models, comments=True) # parse output option tokens = shlex.split(self.output, comments=True) self.output =", "logger.warn('Ignoring invalid output specifier {!r}.', token) # parse user-modules option tokens = shlex.split(self.user_modules,", "all Ecore XMI files starting from base directory and returns a list of", "import distutils.log as logger import logging import pathlib import shlex import pyecore.resources import", "may be overridden by other commands, by the setup script, by config files,", "return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load a single Ecore model from a", "('output=', 'o', 'specify directories where output is generated'), ('user-modules=', None, 'dotted names of", "necessary to generate Python packages representing the classes from Ecore models. This process", "in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as resource: if self.ecore_models is None or resource.name in", "token in tokens: model, output = token.split('=', 1) # check if model and", "models. It searches for Ecore models starting from the base directory and generates", "starting from the base directory and generates a Python package for each found", "parse output option tokens = shlex.split(self.output, comments=True) self.output = collections.defaultdict(lambda: None) for token", "level of distutils.\"\"\" loglevel_map = collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0: logging.WARNING, 1: logging.INFO, 2:", "EcoreGenerator kwargs = {} if self.auto_register_package: kwargs['auto_register_package'] = True if resource.name in self.user_modules:", "and returns a list of them. :param base_path: base path to search for", "for Ecore models starting from the base directory and generates a Python package", "initialize_options(self): \"\"\"Set default values for all the options that this command supports. Note", "for all the options that this command supports. Note that these defaults may", "XMI file and return the root package. :param ecore_model_path: path to Ecore XMI", "shlex.split(self.user_modules, comments=True) self.user_modules = {} for token in tokens: model, user_module = token.split('=',", "package auto-registration for the PyEcore ' '\\'global_registry\\''), ] boolean_options = ['auto-register-package'] def initialize_options(self):", "from Ecore models. This process is controlled by the user options passed on", "= self.output[resource.name] else: output_dir = ecore_xmi_file.parent # generate Python classes logger.info( 'running pyecoregen", "comments=True) self.output = collections.defaultdict(lambda: None) for token in tokens: model, output = token.split('=',", "can be passed by the user :cvar boolean_options: Subset of user options which", "ecore command :cvar user_options: Options which can be passed by the user :cvar", "self.ecore_models: self.ecore_models = shlex.split(self.ecore_models, comments=True) # parse output option tokens = shlex.split(self.output, comments=True)", "tasks necessary to generate Python packages representing the classes from Ecore models. This", "base directory and generates a Python package for each found Ecore model. :cvar", "\\'{!s}\\''.format(str(base_path))) return sorted(base_path.rglob(pattern)) @staticmethod @contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load a single Ecore model from", "names of modules with user-provided mixins to import from ' 'generated classes'), ('auto-register-package',", "\"\"\" _ECORE_FILE_EXT = 'ecore' description = 'generate Python code from Ecore models' user_options", "self.user_modules = '' self.auto_register_package = 0 def finalize_options(self): \"\"\"Set final values for all", "base directory and returns a list of them. :param base_path: base path to", "model and user module are specified if model and user_module: self.user_modules[model] = user_module", "' 'generated classes'), ('auto-register-package', None, 'Generate package auto-registration for the PyEcore ' '\\'global_registry\\''),", "for each found Ecore model. :cvar _ECORE_FILE_EXT: File extension of Ecore XMI file", "token.split('=', 1) # check if model and user module are specified if model", "generate code for {!r} metamodel'.format(resource.name) ) pyecoregen.ecore.EcoreGenerator(**kwargs).generate( resource, output_dir.as_posix() ) else: logger.debug('skipping {!r}", "@contextlib.contextmanager def _load_ecore_model(ecore_model_path): \"\"\"Load a single Ecore model from a Ecore XMI file", "user_module else: logger.warn('Ignoring invalid user module specifier {!r}.', token) def _configure_logging(self): \"\"\"Configure logging", "output is generated'), ('user-modules=', None, 'dotted names of modules with user-provided mixins to", "XMI files \"\"\" pattern = '*.{}'.format(self._ECORE_FILE_EXT) logger.debug('searching for Ecore XMI files in \\'{!s}\\''.format(str(base_path)))", "# generate Python classes logger.info( 'running pyecoregen to generate code for {!r} metamodel'.format(resource.name)", "rset.get_resource(ecore_model_path.as_posix()) yield resource.contents[0] except Exception: raise else: rset.remove_resource(resource) def run(self): \"\"\"Perform all tasks", "loglevel_map = collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({ 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG }) logging.basicConfig(", "'ecore' description = 'generate Python code from Ecore models' user_options = [ ('ecore-models=',", "= None self.output = '' self.user_modules = '' self.auto_register_package = 0 def finalize_options(self):", "this command supports. This is always called as late as possible, ie. after", "'e', 'specify Ecore models to generate code for'), ('output=', 'o', 'specify directories where", "of the Ecore model \"\"\" rset = pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource =", ":cvar boolean_options: Subset of user options which are binary \"\"\" _ECORE_FILE_EXT = 'ecore'", "the command-line. \"\"\" self.ecore_models = None self.output = '' self.user_modules = '' self.auto_register_package", "\"\"\"Configure logging using global verbosity level of distutils.\"\"\" loglevel_map = collections.defaultdict(lambda: logging.WARNING) loglevel_map.update({", "model for ecore_xmi_file in ecore_xmi_files: with self._load_ecore_model(ecore_xmi_file) as resource: if self.ecore_models is None", "1) # check if model and output are specified if model and output:", "shlex import pyecore.resources import pyecoregen.ecore import setuptools class PyEcoreCommand(setuptools.Command): \"\"\"A setuptools command for", "as possible, ie. after any option assignments from the command-line or from other", "if model and user_module: self.user_modules[model] = user_module else: logger.warn('Ignoring invalid user module specifier", "starting from base directory and returns a list of them. :param base_path: base", "command supports. Note that these defaults may be overridden by other commands, by", "or by the command-line. \"\"\" self.ecore_models = None self.output = '' self.user_modules =", "list of them. :param base_path: base path to search for Ecore XMI files", "Ecore model from a Ecore XMI file and return the root package. :param", "# parse output option tokens = shlex.split(self.output, comments=True) self.output = collections.defaultdict(lambda: None) for", "find Ecore XMI files ecore_xmi_files = self._find_ecore_xmi_files() # load each Ecore model for", "None self.output = '' self.user_modules = '' self.auto_register_package = 0 def finalize_options(self): \"\"\"Set", "from Ecore models. An extra command for setuptools to generate static Python classes", "after any option assignments from the command-line or from other commands have been", "Ecore model \"\"\" rset = pyecore.resources.ResourceSet() try: logger.debug('loading \\'{!s}\\''.format(str(ecore_model_path))) resource = rset.get_resource(ecore_model_path.as_posix()) yield" ]
[ "is passed if callable. \"\"\" self.val = val def __call__(self, parsed_args_namespace): if callable(self.val):", "argparse import ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS from collections import namedtuple, OrderedDict def is_subparser(action_group):", "False def error(self, message): \"\"\"Overrides error to control printing output\"\"\" if self._debug: import", "= val return val def __contains__(self, key): return key in self._fields def __getitem__(self,", "Lazy(object): \"\"\"Lazily load a default argument after the args have been parsed\"\"\" def", "groups, except SubParsers for action_group in self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions)", "not attribute %r' % (type(self), key)) val = self._keywords.get(key) if val is not", "'value') + '\\n' header += hfmt.format('---', arg_len, '-----') print(title) print('=' * len(title)) print(header)", "callable(self.val): return self.val(parsed_args_namespace) return self.val class ParsedArgsNamespace(object): def __init__(self, keywords, defaults): self._keywords =", "hfmt.format('arg', arg_len, 'value') + '\\n' header += hfmt.format('---', arg_len, '-----') print(title) print('=' *", "return val val = self._defaults.get(key) if val is not None: if callable(val): val", "' ' * 4 names = self.names() arg_len = max(3, max(map(len, names))) hfmt", "for action_group in self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog", "a lazy object. Args: val -- if object is callable, it should take", "been parsed\"\"\" def __init__(self, val): \"\"\"Initialize a lazy object. Args: val -- if", "logging from argparse import ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS from collections import namedtuple, OrderedDict", "after the args have been parsed\"\"\" def __init__(self, val): \"\"\"Initialize a lazy object.", "self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys())) def __getattr__(self, key): if key not in self._fields:", "raise AttributeError('%r object has not attribute %r' % (type(self), key)) val = self._keywords.get(key)", "def error(self, message): \"\"\"Overrides error to control printing output\"\"\" if self._debug: import pdb", "not print subparsers\"\"\" formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description", "pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message)) def format_help(self): \"\"\"Overrides format_help to not print subparsers\"\"\"", "parser = super(RootParser, self) ns = parser.parse_args(args, namespace) parsed_args = ParsedArgs(OrderedDict(ns._get_kwargs()), defaults) if", "object is passed if callable. \"\"\" self.val = val def __call__(self, parsed_args_namespace): if", "def pprint(self): spacer = ' ' * 4 names = self.names() arg_len =", "or {} self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys())) def __getattr__(self, key): if key not", "in self._fields) def __dir__(self): return sorted(set(dir(type(self)) + self._fields)) def __repr__(self): return 'ParsedArgsNamespace(%r, %r)'", "in self._fields: raise AttributeError('%r object has not attribute %r' % (type(self), key)) val", "user-defined groups, except SubParsers for action_group in self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description)", "self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message)) def format_help(self): \"\"\"Overrides format_help to not print subparsers\"\"\" formatter", "def add_parser(*args, **kwargs): return RootParser(*args, **kwargs) def __init__(self, *args, **kwargs): kwargs['formatter_class'] = CustomHelpFormatter", "class Lazy(object): \"\"\"Lazily load a default argument after the args have been parsed\"\"\"", "it should take one parameter. The arguments namespace object is passed if callable.", "AttributeError('%r object has not attribute %r' % (type(self), key)) val = self._keywords.get(key) if", "def __call__(self, parsed_args_namespace): if callable(self.val): return self.val(parsed_args_namespace) return self.val class ParsedArgsNamespace(object): def __init__(self,", "__init__(self, keywords, defaults): self._keywords = keywords self._defaults = defaults or {} self._fields =", "max(3, max(map(len, names))) hfmt = '{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name, arg_len,", "list(self._defaults.keys())) def __getattr__(self, key): if key not in self._fields: raise AttributeError('%r object has", "epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def parse_args(self, args=None,", "args=None, namespace=None, defaults=None): if not args: args = sys.argv[1:] if len(args) > 0", "def __init__(self, *args, **kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length = 10 class Lazy(object): \"\"\"Lazily", "ParsedArgs(object): def __init__(self, keywords, defaults): self.ns = ParsedArgsNamespace(keywords, defaults) def names(self): return self.ns._fields", "= self._defaults.get(key) if val is not None: if callable(val): val = val(self) self._keywords[key]", "key in self._fields def __getitem__(self, key): return self.__getattr__(key) def __iter__(self): return iter((k, self[k])", "0 and args[0] == '--terseparse-debug': self._debug = True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args = args[1:]", "True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args = args[1:] parser = super(RootParser, self) ns = parser.parse_args(args,", "\"\"\"Lazily load a default argument after the args have been parsed\"\"\" def __init__(self,", "formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above", "self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine", "self.val(parsed_args_namespace) return self.val class ParsedArgsNamespace(object): def __init__(self, keywords, defaults): self._keywords = keywords self._defaults", "import namedtuple, OrderedDict def is_subparser(action_group): for a in action_group._group_actions: if isinstance(a, _SubParsersAction): return", "%r)' % (self._keywords, self._defaults) class ParsedArgs(object): def __init__(self, keywords, defaults): self.ns = ParsedArgsNamespace(keywords,", "= set(list(self._keywords.keys()) + list(self._defaults.keys())) def __getattr__(self, key): if key not in self._fields: raise", "print(title) print('=' * len(title)) print(header) print(msg) class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod def add_parser(*args,", "formatter.format_help() def parse_args(self, args=None, namespace=None, defaults=None): if not args: args = sys.argv[1:] if", "header = hfmt.format('arg', arg_len, 'value') + '\\n' header += hfmt.format('---', arg_len, '-----') print(title)", "return RootParser(*args, **kwargs) def __init__(self, *args, **kwargs): kwargs['formatter_class'] = CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs)", "RawTextHelpFormatter, _SubParsersAction, SUPPRESS from collections import namedtuple, OrderedDict def is_subparser(action_group): for a in", "= val def __call__(self, parsed_args_namespace): if callable(self.val): return self.val(parsed_args_namespace) return self.val class ParsedArgsNamespace(object):", "return self.val(parsed_args_namespace) return self.val class ParsedArgsNamespace(object): def __init__(self, keywords, defaults): self._keywords = keywords", "{} self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys())) def __getattr__(self, key): if key not in", "object has not attribute %r' % (type(self), key)) val = self._keywords.get(key) if val", "self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and", "Args: val -- if object is callable, it should take one parameter. The", "= ParsedArgsNamespace(keywords, defaults) def names(self): return self.ns._fields def pprint(self): spacer = ' '", "if not args: args = sys.argv[1:] if len(args) > 0 and args[0] ==", "\"\"\"Overrides format_help to not print subparsers\"\"\" formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions,", "namespace=None, defaults=None): if not args: args = sys.argv[1:] if len(args) > 0 and", "tb = sys.exc_info() if tb: pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message)) def", "self._debug = True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args = args[1:] parser = super(RootParser, self) ns", "formatter.add_text(self.description) # positionals, optionals and user-defined groups, except SubParsers for action_group in self._action_groups:", "self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups, except SubParsers", "defaults=None): if not args: args = sys.argv[1:] if len(args) > 0 and args[0]", "self.val class ParsedArgsNamespace(object): def __init__(self, keywords, defaults): self._keywords = keywords self._defaults = defaults", "pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message)) def format_help(self): \"\"\"Overrides format_help to not", "<reponame>jthacker/terseparse import sys import logging from argparse import ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS from", "= sys.exc_info() if tb: pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message)) def format_help(self):", "object is callable, it should take one parameter. The arguments namespace object is", "= val(self) self._keywords[key] = val return val val = self._defaults.get(key) if val is", "% (self._keywords, self._defaults) class ParsedArgs(object): def __init__(self, keywords, defaults): self.ns = ParsedArgsNamespace(keywords, defaults)", "spacer = ' ' * 4 names = self.names() arg_len = max(3, max(map(len,", "= self._keywords.get(key) if val is not None: if isinstance(val, Lazy): val = val(self)", "**kwargs): return RootParser(*args, **kwargs) def __init__(self, *args, **kwargs): kwargs['formatter_class'] = CustomHelpFormatter super(RootParser, self).__init__(*args,", "self.ns = ParsedArgsNamespace(keywords, defaults) def names(self): return self.ns._fields def pprint(self): spacer = '", "pdb _, _, tb = sys.exc_info() if tb: pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2,", "def __init__(self, val): \"\"\"Initialize a lazy object. Args: val -- if object is", "= max(3, max(map(len, names))) hfmt = '{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name,", "to not print subparsers\"\"\" formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) #", "import sys import logging from argparse import ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS from collections", "The arguments namespace object is passed if callable. \"\"\" self.val = val def", "self._debug = False def error(self, message): \"\"\"Overrides error to control printing output\"\"\" if", "if callable(self.val): return self.val(parsed_args_namespace) return self.val class ParsedArgsNamespace(object): def __init__(self, keywords, defaults): self._keywords", "self._defaults = defaults or {} self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys())) def __getattr__(self, key):", "have been parsed\"\"\" def __init__(self, val): \"\"\"Initialize a lazy object. Args: val --", "and args[0] == '--terseparse-debug': self._debug = True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args = args[1:] parser", "False class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args, **kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length = 10", "key): if key not in self._fields: raise AttributeError('%r object has not attribute %r'", "formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def", "return val def __contains__(self, key): return key in self._fields def __getitem__(self, key): return", "keywords, defaults): self._keywords = keywords self._defaults = defaults or {} self._fields = set(list(self._keywords.keys())", "a in action_group._group_actions: if isinstance(a, _SubParsersAction): return True return False class CustomHelpFormatter(RawTextHelpFormatter): def", "= ' ' * 4 names = self.names() arg_len = max(3, max(map(len, names)))", "def __getattr__(self, key): if key not in self._fields: raise AttributeError('%r object has not", "name in names) title = 'Parsed Arguments:' header = hfmt.format('arg', arg_len, 'value') +", "sys.exc_info() if tb: pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message)) def format_help(self): \"\"\"Overrides", "= self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals", "from argparse import ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS from collections import namedtuple, OrderedDict def", "arg_len, self.ns[name]) for name in names) title = 'Parsed Arguments:' header = hfmt.format('arg',", "for k in self._fields) def __dir__(self): return sorted(set(dir(type(self)) + self._fields)) def __repr__(self): return", "ParsedArgsNamespace(keywords, defaults) def names(self): return self.ns._fields def pprint(self): spacer = ' ' *", "if val is not None: if isinstance(val, Lazy): val = val(self) self._keywords[key] =", "val = val(self) self._keywords[key] = val return val def __contains__(self, key): return key", "CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs) self._debug = False def error(self, message): \"\"\"Overrides error to", "for a in action_group._group_actions: if isinstance(a, _SubParsersAction): return True return False class CustomHelpFormatter(RawTextHelpFormatter):", "val def __call__(self, parsed_args_namespace): if callable(self.val): return self.val(parsed_args_namespace) return self.val class ParsedArgsNamespace(object): def", "%r' % (type(self), key)) val = self._keywords.get(key) if val is not None: if", "if isinstance(a, _SubParsersAction): return True return False class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args, **kwargs):", "in self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) #", "sys.argv[1:] if len(args) > 0 and args[0] == '--terseparse-debug': self._debug = True logging.getLogger().setLevel(logging.DEBUG)", "one parameter. The arguments namespace object is passed if callable. \"\"\" self.val =", "names = self.names() arg_len = max(3, max(map(len, names))) hfmt = '{:{}}'+spacer+'{}' lfmt =", "callable(val): val = val(self) self._keywords[key] = val return val def __contains__(self, key): return", "val is not None: if isinstance(val, Lazy): val = val(self) self._keywords[key] = val", "is not None: if callable(val): val = val(self) self._keywords[key] = val return val", "except SubParsers for action_group in self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section()", "val return val val = self._defaults.get(key) if val is not None: if callable(val):", "\"\"\"Private Class.\"\"\" @staticmethod def add_parser(*args, **kwargs): return RootParser(*args, **kwargs) def __init__(self, *args, **kwargs):", "\"\"\" self.val = val def __call__(self, parsed_args_namespace): if callable(self.val): return self.val(parsed_args_namespace) return self.val", "self._fields)) def __repr__(self): return 'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults) class ParsedArgs(object): def __init__(self,", "isinstance(a, _SubParsersAction): return True return False class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args, **kwargs): super(CustomHelpFormatter,", "True return False class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args, **kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length", "self.ns[name]) for name in names) title = 'Parsed Arguments:' header = hfmt.format('arg', arg_len,", "output\"\"\" if self._debug: import pdb _, _, tb = sys.exc_info() if tb: pdb.post_mortem(tb)", "Lazy): val = val(self) self._keywords[key] = val return val val = self._defaults.get(key) if", "self[k]) for k in self._fields) def __dir__(self): return sorted(set(dir(type(self)) + self._fields)) def __repr__(self):", "val(self) self._keywords[key] = val return val def __contains__(self, key): return key in self._fields", "pprint(self): spacer = ' ' * 4 names = self.names() arg_len = max(3,", "args[1:] parser = super(RootParser, self) ns = parser.parse_args(args, namespace) parsed_args = ParsedArgs(OrderedDict(ns._get_kwargs()), defaults)", "self._fields) def __dir__(self): return sorted(set(dir(type(self)) + self._fields)) def __repr__(self): return 'ParsedArgsNamespace(%r, %r)' %", "keywords, defaults): self.ns = ParsedArgsNamespace(keywords, defaults) def names(self): return self.ns._fields def pprint(self): spacer", "has not attribute %r' % (type(self), key)) val = self._keywords.get(key) if val is", "hfmt = '{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name", "# determine help from format above return formatter.format_help() def parse_args(self, args=None, namespace=None, defaults=None):", "for name in names) title = 'Parsed Arguments:' header = hfmt.format('arg', arg_len, 'value')", "= val return val val = self._defaults.get(key) if val is not None: if", "\"\"\"Overrides error to control printing output\"\"\" if self._debug: import pdb _, _, tb", "add_parser(*args, **kwargs): return RootParser(*args, **kwargs) def __init__(self, *args, **kwargs): kwargs['formatter_class'] = CustomHelpFormatter super(RootParser,", "*args, **kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length = 10 class Lazy(object): \"\"\"Lazily load a", "'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults) class ParsedArgs(object): def __init__(self, keywords, defaults): self.ns =", "return self.__getattr__(key) def __iter__(self): return iter((k, self[k]) for k in self._fields) def __dir__(self):", "print(header) print(msg) class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod def add_parser(*args, **kwargs): return RootParser(*args, **kwargs)", "tb: pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message)) def format_help(self): \"\"\"Overrides format_help to", "def __init__(self, keywords, defaults): self._keywords = keywords self._defaults = defaults or {} self._fields", "def __contains__(self, key): return key in self._fields def __getitem__(self, key): return self.__getattr__(key) def", "_SubParsersAction): return True return False class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args, **kwargs): super(CustomHelpFormatter, self).__init__(*args,", "val val = self._defaults.get(key) if val is not None: if callable(val): val =", "if isinstance(val, Lazy): val = val(self) self._keywords[key] = val return val val =", "return self.ns._fields def pprint(self): spacer = ' ' * 4 names = self.names()", "= keywords self._defaults = defaults or {} self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys())) def", "val is not None: if callable(val): val = val(self) self._keywords[key] = val return", "def __dir__(self): return sorted(set(dir(type(self)) + self._fields)) def __repr__(self): return 'ParsedArgsNamespace(%r, %r)' % (self._keywords,", "positionals, optionals and user-defined groups, except SubParsers for action_group in self._action_groups: if is_subparser(action_group):", "'--terseparse-debug': self._debug = True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args = args[1:] parser = super(RootParser, self)", "sys import logging from argparse import ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS from collections import", "def __iter__(self): return iter((k, self[k]) for k in self._fields) def __dir__(self): return sorted(set(dir(type(self))", "self.ns._fields def pprint(self): spacer = ' ' * 4 names = self.names() arg_len", "self._keywords = keywords self._defaults = defaults or {} self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys()))", "default argument after the args have been parsed\"\"\" def __init__(self, val): \"\"\"Initialize a", "print subparsers\"\"\" formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description)", "Class.\"\"\" @staticmethod def add_parser(*args, **kwargs): return RootParser(*args, **kwargs) def __init__(self, *args, **kwargs): kwargs['formatter_class']", "ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS from collections import namedtuple, OrderedDict def is_subparser(action_group): for a", "callable. \"\"\" self.val = val def __call__(self, parsed_args_namespace): if callable(self.val): return self.val(parsed_args_namespace) return", "is not None: if isinstance(val, Lazy): val = val(self) self._keywords[key] = val return", "return key in self._fields def __getitem__(self, key): return self.__getattr__(key) def __iter__(self): return iter((k,", "def __getitem__(self, key): return self.__getattr__(key) def __iter__(self): return iter((k, self[k]) for k in", "format_help to not print subparsers\"\"\" formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups)", "**kwargs) def __init__(self, *args, **kwargs): kwargs['formatter_class'] = CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs) self._debug =", "# usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined", "arg_len, 'value') + '\\n' header += hfmt.format('---', arg_len, '-----') print(title) print('=' * len(title))", "= '{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name in names) title =", "not in self._fields: raise AttributeError('%r object has not attribute %r' % (type(self), key))", "* len(title)) print(header) print(msg) class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod def add_parser(*args, **kwargs): return", "self._keywords[key] = val return val val = self._defaults.get(key) if val is not None:", "subparsers\"\"\" formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) #", "printing output\"\"\" if self._debug: import pdb _, _, tb = sys.exc_info() if tb:", "if key not in self._fields: raise AttributeError('%r object has not attribute %r' %", "max(map(len, names))) hfmt = '{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name, arg_len, self.ns[name])", "sorted(set(dir(type(self)) + self._fields)) def __repr__(self): return 'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults) class ParsedArgs(object):", "= 'Parsed Arguments:' header = hfmt.format('arg', arg_len, 'value') + '\\n' header += hfmt.format('---',", "from collections import namedtuple, OrderedDict def is_subparser(action_group): for a in action_group._group_actions: if isinstance(a,", "arguments namespace object is passed if callable. \"\"\" self.val = val def __call__(self,", "+ self._fields)) def __repr__(self): return 'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults) class ParsedArgs(object): def", "= parser.parse_args(args, namespace) parsed_args = ParsedArgs(OrderedDict(ns._get_kwargs()), defaults) if self._debug: parsed_args.pprint() return parser, parsed_args", "and user-defined groups, except SubParsers for action_group in self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title)", "names))) hfmt = '{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for", "return True return False class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args, **kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs)", "lfmt = '{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name in names) title", "= sys.argv[1:] if len(args) > 0 and args[0] == '--terseparse-debug': self._debug = True", "hfmt.format('---', arg_len, '-----') print(title) print('=' * len(title)) print(header) print(msg) class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\"", "super(RootParser, self).__init__(*args, **kwargs) self._debug = False def error(self, message): \"\"\"Overrides error to control", "arg_len = max(3, max(map(len, names))) hfmt = '{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}' msg =", "above return formatter.format_help() def parse_args(self, args=None, namespace=None, defaults=None): if not args: args =", "if tb: pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message)) def format_help(self): \"\"\"Overrides format_help", "parsed\"\"\" def __init__(self, val): \"\"\"Initialize a lazy object. Args: val -- if object", "= args[1:] parser = super(RootParser, self) ns = parser.parse_args(args, namespace) parsed_args = ParsedArgs(OrderedDict(ns._get_kwargs()),", "formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def parse_args(self, args=None, namespace=None,", "'-----') print(title) print('=' * len(title)) print(header) print(msg) class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod def", "self.names() arg_len = max(3, max(map(len, names))) hfmt = '{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}' msg", "control printing output\"\"\" if self._debug: import pdb _, _, tb = sys.exc_info() if", "len(title)) print(header) print(msg) class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod def add_parser(*args, **kwargs): return RootParser(*args,", "self) ns = parser.parse_args(args, namespace) parsed_args = ParsedArgs(OrderedDict(ns._get_kwargs()), defaults) if self._debug: parsed_args.pprint() return", "'{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name in names)", "self._defaults) class ParsedArgs(object): def __init__(self, keywords, defaults): self.ns = ParsedArgsNamespace(keywords, defaults) def names(self):", "self._debug: import pdb _, _, tb = sys.exc_info() if tb: pdb.post_mortem(tb) else: pdb.set_trace()", "print(msg) class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod def add_parser(*args, **kwargs): return RootParser(*args, **kwargs) def", "self._keywords[key] = val return val def __contains__(self, key): return key in self._fields def", "= 10 class Lazy(object): \"\"\"Lazily load a default argument after the args have", "is_subparser(action_group): for a in action_group._group_actions: if isinstance(a, _SubParsersAction): return True return False class", "__repr__(self): return 'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults) class ParsedArgs(object): def __init__(self, keywords, defaults):", "'{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name in names) title = 'Parsed", "parameter. The arguments namespace object is passed if callable. \"\"\" self.val = val", "__iter__(self): return iter((k, self[k]) for k in self._fields) def __dir__(self): return sorted(set(dir(type(self)) +", "'\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name in names) title = 'Parsed Arguments:' header =", "**kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length = 10 class Lazy(object): \"\"\"Lazily load a default", "= super(RootParser, self) ns = parser.parse_args(args, namespace) parsed_args = ParsedArgs(OrderedDict(ns._get_kwargs()), defaults) if self._debug:", "take one parameter. The arguments namespace object is passed if callable. \"\"\" self.val", "not args: args = sys.argv[1:] if len(args) > 0 and args[0] == '--terseparse-debug':", "in action_group._group_actions: if isinstance(a, _SubParsersAction): return True return False class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self,", "description formatter.add_text(self.description) # positionals, optionals and user-defined groups, except SubParsers for action_group in", "= defaults or {} self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys())) def __getattr__(self, key): if", "collections import namedtuple, OrderedDict def is_subparser(action_group): for a in action_group._group_actions: if isinstance(a, _SubParsersAction):", "key): return self.__getattr__(key) def __iter__(self): return iter((k, self[k]) for k in self._fields) def", "parse_args(self, args=None, namespace=None, defaults=None): if not args: args = sys.argv[1:] if len(args) >", "defaults) def names(self): return self.ns._fields def pprint(self): spacer = ' ' * 4", "return 'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults) class ParsedArgs(object): def __init__(self, keywords, defaults): self.ns", "@staticmethod def add_parser(*args, **kwargs): return RootParser(*args, **kwargs) def __init__(self, *args, **kwargs): kwargs['formatter_class'] =", "__getattr__(self, key): if key not in self._fields: raise AttributeError('%r object has not attribute", "args = sys.argv[1:] if len(args) > 0 and args[0] == '--terseparse-debug': self._debug =", "super(RootParser, self) ns = parser.parse_args(args, namespace) parsed_args = ParsedArgs(OrderedDict(ns._get_kwargs()), defaults) if self._debug: parsed_args.pprint()", "logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args = args[1:] parser = super(RootParser, self) ns = parser.parse_args(args, namespace)", "args have been parsed\"\"\" def __init__(self, val): \"\"\"Initialize a lazy object. Args: val", "_SubParsersAction, SUPPRESS from collections import namedtuple, OrderedDict def is_subparser(action_group): for a in action_group._group_actions:", "determine help from format above return formatter.format_help() def parse_args(self, args=None, namespace=None, defaults=None): if", "== '--terseparse-debug': self._debug = True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args = args[1:] parser = super(RootParser,", "set(list(self._keywords.keys()) + list(self._defaults.keys())) def __getattr__(self, key): if key not in self._fields: raise AttributeError('%r", "_, tb = sys.exc_info() if tb: pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message))", "\"\"\"Initialize a lazy object. Args: val -- if object is callable, it should", "self._defaults.get(key) if val is not None: if callable(val): val = val(self) self._keywords[key] =", "formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals,", "if object is callable, it should take one parameter. The arguments namespace object", "def __repr__(self): return 'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults) class ParsedArgs(object): def __init__(self, keywords,", "optionals and user-defined groups, except SubParsers for action_group in self._action_groups: if is_subparser(action_group): continue", "__init__(self, val): \"\"\"Initialize a lazy object. Args: val -- if object is callable,", "'Parsed Arguments:' header = hfmt.format('arg', arg_len, 'value') + '\\n' header += hfmt.format('---', arg_len,", "else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR: {}\\n').format(message)) def format_help(self): \"\"\"Overrides format_help to not print", "% (type(self), key)) val = self._keywords.get(key) if val is not None: if isinstance(val,", "_, _, tb = sys.exc_info() if tb: pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr) self.exit(2, ('\\nERROR:", "self._fields def __getitem__(self, key): return self.__getattr__(key) def __iter__(self): return iter((k, self[k]) for k", "names) title = 'Parsed Arguments:' header = hfmt.format('arg', arg_len, 'value') + '\\n' header", "self._keywords.get(key) if val is not None: if isinstance(val, Lazy): val = val(self) self._keywords[key]", "object. Args: val -- if object is callable, it should take one parameter.", "def parse_args(self, args=None, namespace=None, defaults=None): if not args: args = sys.argv[1:] if len(args)", "keywords self._defaults = defaults or {} self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys())) def __getattr__(self,", "= False def error(self, message): \"\"\"Overrides error to control printing output\"\"\" if self._debug:", "not None: if callable(val): val = val(self) self._keywords[key] = val return val def", "' * 4 names = self.names() arg_len = max(3, max(map(len, names))) hfmt =", "if callable. \"\"\" self.val = val def __call__(self, parsed_args_namespace): if callable(self.val): return self.val(parsed_args_namespace)", "return False class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args, **kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length =", "val(self) self._keywords[key] = val return val val = self._defaults.get(key) if val is not", "= hfmt.format('arg', arg_len, 'value') + '\\n' header += hfmt.format('---', arg_len, '-----') print(title) print('='", "if callable(val): val = val(self) self._keywords[key] = val return val def __contains__(self, key):", "action_group._group_actions: if isinstance(a, _SubParsersAction): return True return False class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args,", "args: args = sys.argv[1:] if len(args) > 0 and args[0] == '--terseparse-debug': self._debug", "(self._keywords, self._defaults) class ParsedArgs(object): def __init__(self, keywords, defaults): self.ns = ParsedArgsNamespace(keywords, defaults) def", "to control printing output\"\"\" if self._debug: import pdb _, _, tb = sys.exc_info()", "def format_help(self): \"\"\"Overrides format_help to not print subparsers\"\"\" formatter = self._get_formatter() # usage", "__call__(self, parsed_args_namespace): if callable(self.val): return self.val(parsed_args_namespace) return self.val class ParsedArgsNamespace(object): def __init__(self, keywords,", "self).__init__(*args, **kwargs) self._action_max_length = 10 class Lazy(object): \"\"\"Lazily load a default argument after", "formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return", "defaults or {} self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys())) def __getattr__(self, key): if key", "arg_len, '-----') print(title) print('=' * len(title)) print(header) print(msg) class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod", "iter((k, self[k]) for k in self._fields) def __dir__(self): return sorted(set(dir(type(self)) + self._fields)) def", "__dir__(self): return sorted(set(dir(type(self)) + self._fields)) def __repr__(self): return 'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults)", "__init__(self, *args, **kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length = 10 class Lazy(object): \"\"\"Lazily load", "ParsedArgsNamespace(object): def __init__(self, keywords, defaults): self._keywords = keywords self._defaults = defaults or {}", "self.exit(2, ('\\nERROR: {}\\n').format(message)) def format_help(self): \"\"\"Overrides format_help to not print subparsers\"\"\" formatter =", "def __init__(self, keywords, defaults): self.ns = ParsedArgsNamespace(keywords, defaults) def names(self): return self.ns._fields def", "SubParsers for action_group in self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() #", "self.__getattr__(key) def __iter__(self): return iter((k, self[k]) for k in self._fields) def __dir__(self): return", "*args, **kwargs): kwargs['formatter_class'] = CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs) self._debug = False def error(self,", "is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from", "* 4 names = self.names() arg_len = max(3, max(map(len, names))) hfmt = '{:{}}'+spacer+'{}'", "header += hfmt.format('---', arg_len, '-----') print(title) print('=' * len(title)) print(header) print(msg) class RootParser(ArgumentParser):", "defaults): self._keywords = keywords self._defaults = defaults or {} self._fields = set(list(self._keywords.keys()) +", "super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length = 10 class Lazy(object): \"\"\"Lazily load a default argument", "format_help(self): \"\"\"Overrides format_help to not print subparsers\"\"\" formatter = self._get_formatter() # usage formatter.add_usage(self.usage,", "key not in self._fields: raise AttributeError('%r object has not attribute %r' % (type(self),", "= self.names() arg_len = max(3, max(map(len, names))) hfmt = '{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}'", "msg = '\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name in names) title = 'Parsed Arguments:'", "Arguments:' header = hfmt.format('arg', arg_len, 'value') + '\\n' header += hfmt.format('---', arg_len, '-----')", "None: if callable(val): val = val(self) self._keywords[key] = val return val def __contains__(self,", "val return val def __contains__(self, key): return key in self._fields def __getitem__(self, key):", "len(args) > 0 and args[0] == '--terseparse-debug': self._debug = True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args", "title = 'Parsed Arguments:' header = hfmt.format('arg', arg_len, 'value') + '\\n' header +=", "__init__(self, *args, **kwargs): kwargs['formatter_class'] = CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs) self._debug = False def", "error(self, message): \"\"\"Overrides error to control printing output\"\"\" if self._debug: import pdb _,", "lazy object. Args: val -- if object is callable, it should take one", "# description formatter.add_text(self.description) # positionals, optionals and user-defined groups, except SubParsers for action_group", "args[0] == '--terseparse-debug': self._debug = True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args = args[1:] parser =", "__contains__(self, key): return key in self._fields def __getitem__(self, key): return self.__getattr__(key) def __iter__(self):", "__init__(self, keywords, defaults): self.ns = ParsedArgsNamespace(keywords, defaults) def names(self): return self.ns._fields def pprint(self):", "kwargs['formatter_class'] = CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs) self._debug = False def error(self, message): \"\"\"Overrides", "= '{:{}}'+spacer+'{}' lfmt = '{:{}}'+spacer+'{!r}' msg = '\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name in", "passed if callable. \"\"\" self.val = val def __call__(self, parsed_args_namespace): if callable(self.val): return", "val = self._keywords.get(key) if val is not None: if isinstance(val, Lazy): val =", "class CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args, **kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length = 10 class", "10 class Lazy(object): \"\"\"Lazily load a default argument after the args have been", "from format above return formatter.format_help() def parse_args(self, args=None, namespace=None, defaults=None): if not args:", "CustomHelpFormatter(RawTextHelpFormatter): def __init__(self, *args, **kwargs): super(CustomHelpFormatter, self).__init__(*args, **kwargs) self._action_max_length = 10 class Lazy(object):", "val def __contains__(self, key): return key in self._fields def __getitem__(self, key): return self.__getattr__(key)", "val = self._defaults.get(key) if val is not None: if callable(val): val = val(self)", "action_group in self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog)", "not None: if isinstance(val, Lazy): val = val(self) self._keywords[key] = val return val", "self.val = val def __call__(self, parsed_args_namespace): if callable(self.val): return self.val(parsed_args_namespace) return self.val class", "key): return key in self._fields def __getitem__(self, key): return self.__getattr__(key) def __iter__(self): return", "message): \"\"\"Overrides error to control printing output\"\"\" if self._debug: import pdb _, _,", "'\\n' header += hfmt.format('---', arg_len, '-----') print(title) print('=' * len(title)) print(header) print(msg) class", "return formatter.format_help() def parse_args(self, args=None, namespace=None, defaults=None): if not args: args = sys.argv[1:]", "args = args[1:] parser = super(RootParser, self) ns = parser.parse_args(args, namespace) parsed_args =", "self).__init__(*args, **kwargs) self._debug = False def error(self, message): \"\"\"Overrides error to control printing", "continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format", "self._action_max_length = 10 class Lazy(object): \"\"\"Lazily load a default argument after the args", "= val(self) self._keywords[key] = val return val def __contains__(self, key): return key in", "formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help()", "import pdb _, _, tb = sys.exc_info() if tb: pdb.post_mortem(tb) else: pdb.set_trace() self.print_usage(sys.stderr)", "names(self): return self.ns._fields def pprint(self): spacer = ' ' * 4 names =", "attribute %r' % (type(self), key)) val = self._keywords.get(key) if val is not None:", "print('=' * len(title)) print(header) print(msg) class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod def add_parser(*args, **kwargs):", "**kwargs) self._debug = False def error(self, message): \"\"\"Overrides error to control printing output\"\"\"", "> 0 and args[0] == '--terseparse-debug': self._debug = True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args =", "defaults): self.ns = ParsedArgsNamespace(keywords, defaults) def names(self): return self.ns._fields def pprint(self): spacer =", "4 names = self.names() arg_len = max(3, max(map(len, names))) hfmt = '{:{}}'+spacer+'{}' lfmt", "self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups, except SubParsers for", "return iter((k, self[k]) for k in self._fields) def __dir__(self): return sorted(set(dir(type(self)) + self._fields))", "RootParser(*args, **kwargs) def __init__(self, *args, **kwargs): kwargs['formatter_class'] = CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs) self._debug", "the args have been parsed\"\"\" def __init__(self, val): \"\"\"Initialize a lazy object. Args:", "OrderedDict def is_subparser(action_group): for a in action_group._group_actions: if isinstance(a, _SubParsersAction): return True return", "__getitem__(self, key): return self.__getattr__(key) def __iter__(self): return iter((k, self[k]) for k in self._fields)", "**kwargs): kwargs['formatter_class'] = CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs) self._debug = False def error(self, message):", "{}\\n').format(message)) def format_help(self): \"\"\"Overrides format_help to not print subparsers\"\"\" formatter = self._get_formatter() #", "self._fields: raise AttributeError('%r object has not attribute %r' % (type(self), key)) val =", "namedtuple, OrderedDict def is_subparser(action_group): for a in action_group._group_actions: if isinstance(a, _SubParsersAction): return True", "load a default argument after the args have been parsed\"\"\" def __init__(self, val):", "logging.basicConfig() args = args[1:] parser = super(RootParser, self) ns = parser.parse_args(args, namespace) parsed_args", "import logging from argparse import ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS from collections import namedtuple,", "namespace object is passed if callable. \"\"\" self.val = val def __call__(self, parsed_args_namespace):", "= '\\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name in names) title = 'Parsed Arguments:' header", "**kwargs) self._action_max_length = 10 class Lazy(object): \"\"\"Lazily load a default argument after the", "class ParsedArgs(object): def __init__(self, keywords, defaults): self.ns = ParsedArgsNamespace(keywords, defaults) def names(self): return", "class ParsedArgsNamespace(object): def __init__(self, keywords, defaults): self._keywords = keywords self._defaults = defaults or", "callable, it should take one parameter. The arguments namespace object is passed if", "if len(args) > 0 and args[0] == '--terseparse-debug': self._debug = True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig()", "usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups,", "def is_subparser(action_group): for a in action_group._group_actions: if isinstance(a, _SubParsersAction): return True return False", "(type(self), key)) val = self._keywords.get(key) if val is not None: if isinstance(val, Lazy):", "None: if isinstance(val, Lazy): val = val(self) self._keywords[key] = val return val val", "in names) title = 'Parsed Arguments:' header = hfmt.format('arg', arg_len, 'value') + '\\n'", "should take one parameter. The arguments namespace object is passed if callable. \"\"\"", "help from format above return formatter.format_help() def parse_args(self, args=None, namespace=None, defaults=None): if not", "is callable, it should take one parameter. The arguments namespace object is passed", "key)) val = self._keywords.get(key) if val is not None: if isinstance(val, Lazy): val", "= CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs) self._debug = False def error(self, message): \"\"\"Overrides error", "formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups, except", "ns = parser.parse_args(args, namespace) parsed_args = ParsedArgs(OrderedDict(ns._get_kwargs()), defaults) if self._debug: parsed_args.pprint() return parser,", "class RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod def add_parser(*args, **kwargs): return RootParser(*args, **kwargs) def __init__(self,", "if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help", "argument after the args have been parsed\"\"\" def __init__(self, val): \"\"\"Initialize a lazy", "in self._fields def __getitem__(self, key): return self.__getattr__(key) def __iter__(self): return iter((k, self[k]) for", "# epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def parse_args(self,", "format above return formatter.format_help() def parse_args(self, args=None, namespace=None, defaults=None): if not args: args", "RootParser(ArgumentParser): \"\"\"Private Class.\"\"\" @staticmethod def add_parser(*args, **kwargs): return RootParser(*args, **kwargs) def __init__(self, *args,", "= True logging.getLogger().setLevel(logging.DEBUG) logging.basicConfig() args = args[1:] parser = super(RootParser, self) ns =", "isinstance(val, Lazy): val = val(self) self._keywords[key] = val return val val = self._defaults.get(key)", "val = val(self) self._keywords[key] = val return val val = self._defaults.get(key) if val", "k in self._fields) def __dir__(self): return sorted(set(dir(type(self)) + self._fields)) def __repr__(self): return 'ParsedArgsNamespace(%r,", "SUPPRESS from collections import namedtuple, OrderedDict def is_subparser(action_group): for a in action_group._group_actions: if", "if self._debug: import pdb _, _, tb = sys.exc_info() if tb: pdb.post_mortem(tb) else:", "def names(self): return self.ns._fields def pprint(self): spacer = ' ' * 4 names", "-- if object is callable, it should take one parameter. The arguments namespace", "a default argument after the args have been parsed\"\"\" def __init__(self, val): \"\"\"Initialize", "('\\nERROR: {}\\n').format(message)) def format_help(self): \"\"\"Overrides format_help to not print subparsers\"\"\" formatter = self._get_formatter()", "+ list(self._defaults.keys())) def __getattr__(self, key): if key not in self._fields: raise AttributeError('%r object", "+ '\\n' header += hfmt.format('---', arg_len, '-----') print(title) print('=' * len(title)) print(header) print(msg)", "val -- if object is callable, it should take one parameter. The arguments", "if val is not None: if callable(val): val = val(self) self._keywords[key] = val", "import ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS from collections import namedtuple, OrderedDict def is_subparser(action_group): for", "return sorted(set(dir(type(self)) + self._fields)) def __repr__(self): return 'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults) class", "return self.val class ParsedArgsNamespace(object): def __init__(self, keywords, defaults): self._keywords = keywords self._defaults =", "def __init__(self, *args, **kwargs): kwargs['formatter_class'] = CustomHelpFormatter super(RootParser, self).__init__(*args, **kwargs) self._debug = False", "# positionals, optionals and user-defined groups, except SubParsers for action_group in self._action_groups: if", "val): \"\"\"Initialize a lazy object. Args: val -- if object is callable, it", "parsed_args_namespace): if callable(self.val): return self.val(parsed_args_namespace) return self.val class ParsedArgsNamespace(object): def __init__(self, keywords, defaults):", "error to control printing output\"\"\" if self._debug: import pdb _, _, tb =", "+= hfmt.format('---', arg_len, '-----') print(title) print('=' * len(title)) print(header) print(msg) class RootParser(ArgumentParser): \"\"\"Private" ]
[ "friendly_name: str | None = None grandparent_rating_key: str | None = None grandparent_thumb:", "= None section_id: int | None = None started: str | None =", "None content_rating: str | None = None count: int | None = None", "grandparent_rating_key: str | None = None grandparent_thumb: str | None = None guid:", "__future__ import annotations from pytautulli.models.user import PyTautulliApiUser from .base import APIResponseType, PyTautulliApiBaseModel class", "str | None = None users_watched: str | None = None year: int", "None thumb: str | None = None title: str | None = None", "rows: list[PyTautulliApiHomeStatsRow] = None def __post_init__(self): super().__post_init__() self.rows = [PyTautulliApiHomeStatsRow(row) for row in", "None row_id: int | None = None section_id: int | None = None", "str | None = None total_duration: int | None = None total_plays: int", "= None stat_type: str | None = None stat_title: str | None =", "| None = None year: int | None = None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\"", "| None = None content_rating: str | None = None count: int |", "= None def __post_init__(self): super().__post_init__() self.rows = [PyTautulliApiHomeStatsRow(row) for row in self.rows or", "| None = None count: int | None = None friendly_name: str |", "| None = None row_id: int | None = None section_id: int |", "users_watched: str | None = None year: int | None = None class", "stat_type: str | None = None stat_title: str | None = None rows:", "None = None title: str | None = None total_duration: int | None", "PyTautulliApiUser from .base import APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT", "total_duration: int | None = None total_plays: int | None = None user:", "None = None count: int | None = None friendly_name: str | None", "None live: bool | None = None media_type: str | None = None", "None = None platform: str | None = None platform_name: str | None", "= None users_watched: str | None = None year: int | None =", "= None platform: str | None = None platform_name: str | None =", "None = None labels: list[str] | None = None last_play: str | None", "= None friendly_name: str | None = None grandparent_rating_key: str | None =", "from pytautulli.models.user import PyTautulliApiUser from .base import APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\"", "= None platform_name: str | None = None rating_key: int | None =", "thumb: str | None = None title: str | None = None total_duration:", "= None stat_title: str | None = None rows: list[PyTautulliApiHomeStatsRow] = None def", "None stat_type: str | None = None stat_title: str | None = None", "from .base import APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT art:", "str | None = None grandparent_thumb: str | None = None guid: str", "stat_title: str | None = None rows: list[PyTautulliApiHomeStatsRow] = None def __post_init__(self): super().__post_init__()", "rating_key: int | None = None row_id: int | None = None section_id:", "count: int | None = None friendly_name: str | None = None grandparent_rating_key:", "None = None guid: str | None = None labels: list[str] | None", "str | None = None title: str | None = None total_duration: int", "from __future__ import annotations from pytautulli.models.user import PyTautulliApiUser from .base import APIResponseType, PyTautulliApiBaseModel", "= None live: bool | None = None media_type: str | None =", "= None row_id: int | None = None section_id: int | None =", "None = None stopped: str | None = None thumb: str | None", "= None grandparent_rating_key: str | None = None grandparent_thumb: str | None =", "None stopped: str | None = None thumb: str | None = None", "None = None thumb: str | None = None title: str | None", "| None = None users_watched: str | None = None year: int |", "PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST stat_id: str | None = None stat_type: str", "APIResponseType.LIST stat_id: str | None = None stat_type: str | None = None", "str | None = None grandparent_rating_key: str | None = None grandparent_thumb: str", "int | None = None friendly_name: str | None = None grandparent_rating_key: str", "str | None = None thumb: str | None = None title: str", "str | None = None rating_key: int | None = None row_id: int", "str | None = None stat_type: str | None = None stat_title: str", "None guid: str | None = None labels: list[str] | None = None", "art: str | None = None content_rating: str | None = None count:", "None rating_key: int | None = None row_id: int | None = None", "| None = None section_id: int | None = None started: str |", "| None = None friendly_name: str | None = None grandparent_rating_key: str |", "title: str | None = None total_duration: int | None = None total_plays:", "| None = None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST stat_id: str |", "str | None = None count: int | None = None friendly_name: str", "| None = None labels: list[str] | None = None last_play: str |", "\"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT art: str | None = None content_rating: str |", "last_play: str | None = None live: bool | None = None media_type:", "= None last_play: str | None = None live: bool | None =", "= None thumb: str | None = None title: str | None =", "total_plays: int | None = None user: str | None = None users_watched:", "| None = None stat_type: str | None = None stat_title: str |", "import APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT art: str |", "None year: int | None = None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST", "None stat_title: str | None = None rows: list[PyTautulliApiHomeStatsRow] = None def __post_init__(self):", "None = None stat_title: str | None = None rows: list[PyTautulliApiHomeStatsRow] = None", "APIResponseType.DICT art: str | None = None content_rating: str | None = None", "class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT art: str | None = None", "= None total_plays: int | None = None user: str | None =", "import annotations from pytautulli.models.user import PyTautulliApiUser from .base import APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser,", "None = None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST stat_id: str | None", "= None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST stat_id: str | None =", "= None user: str | None = None users_watched: str | None =", "= APIResponseType.LIST stat_id: str | None = None stat_type: str | None =", "| None = None media_type: str | None = None platform: str |", "str | None = None platform: str | None = None platform_name: str", "None def __post_init__(self): super().__post_init__() self.rows = [PyTautulliApiHomeStatsRow(row) for row in self.rows or []]", "int | None = None started: str | None = None stopped: str", "str | None = None platform_name: str | None = None rating_key: int", "str | None = None rows: list[PyTautulliApiHomeStatsRow] = None def __post_init__(self): super().__post_init__() self.rows", "None = None live: bool | None = None media_type: str | None", "None = None total_duration: int | None = None total_plays: int | None", "live: bool | None = None media_type: str | None = None platform:", "= None stopped: str | None = None thumb: str | None =", "None total_plays: int | None = None user: str | None = None", "| None = None live: bool | None = None media_type: str |", "| None = None total_plays: int | None = None user: str |", "platform: str | None = None platform_name: str | None = None rating_key:", "str | None = None labels: list[str] | None = None last_play: str", "= None total_duration: int | None = None total_plays: int | None =", "None user: str | None = None users_watched: str | None = None", "grandparent_thumb: str | None = None guid: str | None = None labels:", "stopped: str | None = None thumb: str | None = None title:", "None = None media_type: str | None = None platform: str | None", "user: str | None = None users_watched: str | None = None year:", "| None = None total_duration: int | None = None total_plays: int |", "year: int | None = None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST stat_id:", "None = None grandparent_thumb: str | None = None guid: str | None", "class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST stat_id: str | None = None stat_type:", "annotations from pytautulli.models.user import PyTautulliApiUser from .base import APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel):", "pytautulli.models.user import PyTautulliApiUser from .base import APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype", "None started: str | None = None stopped: str | None = None", "list[str] | None = None last_play: str | None = None live: bool", "= None year: int | None = None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype =", "_responsetype = APIResponseType.DICT art: str | None = None content_rating: str | None", "str | None = None content_rating: str | None = None count: int", "None rows: list[PyTautulliApiHomeStatsRow] = None def __post_init__(self): super().__post_init__() self.rows = [PyTautulliApiHomeStatsRow(row) for row", "None = None section_id: int | None = None started: str | None", "None platform: str | None = None platform_name: str | None = None", "None = None year: int | None = None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype", "guid: str | None = None labels: list[str] | None = None last_play:", "| None = None grandparent_thumb: str | None = None guid: str |", "started: str | None = None stopped: str | None = None thumb:", "platform_name: str | None = None rating_key: int | None = None row_id:", "| None = None stopped: str | None = None thumb: str |", "= None content_rating: str | None = None count: int | None =", "int | None = None total_plays: int | None = None user: str", "None = None total_plays: int | None = None user: str | None", "| None = None platform: str | None = None platform_name: str |", "row_id: int | None = None section_id: int | None = None started:", "= APIResponseType.DICT art: str | None = None content_rating: str | None =", "int | None = None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST stat_id: str", "list[PyTautulliApiHomeStatsRow] = None def __post_init__(self): super().__post_init__() self.rows = [PyTautulliApiHomeStatsRow(row) for row in self.rows", "| None = None rating_key: int | None = None row_id: int |", "int | None = None section_id: int | None = None started: str", "stat_id: str | None = None stat_type: str | None = None stat_title:", "section_id: int | None = None started: str | None = None stopped:", "| None = None guid: str | None = None labels: list[str] |", "| None = None started: str | None = None stopped: str |", "None = None friendly_name: str | None = None grandparent_rating_key: str | None", ".base import APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT art: str", "int | None = None user: str | None = None users_watched: str", "None grandparent_thumb: str | None = None guid: str | None = None", "labels: list[str] | None = None last_play: str | None = None live:", "= None rows: list[PyTautulliApiHomeStatsRow] = None def __post_init__(self): super().__post_init__() self.rows = [PyTautulliApiHomeStatsRow(row) for", "None = None grandparent_rating_key: str | None = None grandparent_thumb: str | None", "import PyTautulliApiUser from .base import APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype =", "None = None started: str | None = None stopped: str | None", "| None = None rows: list[PyTautulliApiHomeStatsRow] = None def __post_init__(self): super().__post_init__() self.rows =", "= None labels: list[str] | None = None last_play: str | None =", "= None title: str | None = None total_duration: int | None =", "= None guid: str | None = None labels: list[str] | None =", "= None media_type: str | None = None platform: str | None =", "PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT art: str | None = None content_rating:", "None = None users_watched: str | None = None year: int | None", "None count: int | None = None friendly_name: str | None = None", "media_type: str | None = None platform: str | None = None platform_name:", "None = None last_play: str | None = None live: bool | None", "| None = None stat_title: str | None = None rows: list[PyTautulliApiHomeStatsRow] =", "None total_duration: int | None = None total_plays: int | None = None", "= None count: int | None = None friendly_name: str | None =", "| None = None last_play: str | None = None live: bool |", "PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT art: str | None = None content_rating: str", "\"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST stat_id: str | None = None stat_type: str |", "\"\"\"PyTautulliApiHomeStats.\"\"\" from __future__ import annotations from pytautulli.models.user import PyTautulliApiUser from .base import APIResponseType,", "str | None = None guid: str | None = None labels: list[str]", "None = None row_id: int | None = None section_id: int | None", "None class PyTautulliApiHomeStats(PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStats.\"\"\" _responsetype = APIResponseType.LIST stat_id: str | None = None", "str | None = None year: int | None = None class PyTautulliApiHomeStats(PyTautulliApiBaseModel):", "None title: str | None = None total_duration: int | None = None", "None = None platform_name: str | None = None rating_key: int | None", "bool | None = None media_type: str | None = None platform: str", "int | None = None row_id: int | None = None section_id: int", "= None started: str | None = None stopped: str | None =", "None labels: list[str] | None = None last_play: str | None = None", "_responsetype = APIResponseType.LIST stat_id: str | None = None stat_type: str | None", "None platform_name: str | None = None rating_key: int | None = None", "PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT art: str | None =", "content_rating: str | None = None count: int | None = None friendly_name:", "None users_watched: str | None = None year: int | None = None", "None = None content_rating: str | None = None count: int | None", "None = None rating_key: int | None = None row_id: int | None", "None = None user: str | None = None users_watched: str | None", "None = None stat_type: str | None = None stat_title: str | None", "APIResponseType, PyTautulliApiBaseModel class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel): \"\"\"PyTautulliApiHomeStatsRow\"\"\" _responsetype = APIResponseType.DICT art: str | None", "| None = None title: str | None = None total_duration: int |", "| None = None user: str | None = None users_watched: str |", "| None = None grandparent_rating_key: str | None = None grandparent_thumb: str |", "str | None = None live: bool | None = None media_type: str", "None section_id: int | None = None started: str | None = None", "= None rating_key: int | None = None row_id: int | None =", "str | None = None stopped: str | None = None thumb: str", "str | None = None stat_title: str | None = None rows: list[PyTautulliApiHomeStatsRow]", "| None = None thumb: str | None = None title: str |", "= None grandparent_thumb: str | None = None guid: str | None =", "None grandparent_rating_key: str | None = None grandparent_thumb: str | None = None", "| None = None platform_name: str | None = None rating_key: int |", "None friendly_name: str | None = None grandparent_rating_key: str | None = None", "None media_type: str | None = None platform: str | None = None", "None = None rows: list[PyTautulliApiHomeStatsRow] = None def __post_init__(self): super().__post_init__() self.rows = [PyTautulliApiHomeStatsRow(row)", "None last_play: str | None = None live: bool | None = None" ]
[ "i in range(len(trainTitle))] for i in range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs =", ": text_array[i, j] = vocab['the'] return text_array def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1): train_bodies", "trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength = 0 for i in range(len(trainDocs)): maxDocLength", "onehot(y0Train,2) elif typeOfClassify==2: x1Train = x1Train[y0Train>0] x2Train = x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1 x1Valid", "= list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx] trainRes =", "= np.zeros([len(data),nClass]) for i in range(nClass): data2[np.where(data==i),i]= 1 return data2 def get_text_idx(text,vocab,max_document_length): text_array", "text_array = np.zeros([len(text), max_document_length],dtype=np.int32) for i,x in enumerate(text): words = x for j,", "for i in range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs) trainDocs =", "text_array[i, j] = vocab['the'] return text_array def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1): train_bodies =", "in range(len(trainTitle))] for i in range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs)", "trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx] y0Valid = trainRes[validIdx] if typeOfClassify==0: yValid", "yValid = onehot(y0Valid,2) yTrain = onehot(y0Train,2) elif typeOfClassify==2: x1Train = x1Train[y0Train>0] x2Train =", "range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs) trainDocs = np.array(trainDocs) trainTitle =", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Sun Jan 21", "i in range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0 for i in range(len(trainTitle)):", "= np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx = sorted(validIdx) fullIdx = list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx)))", "\"\"\" from read_data import * from TokenizeSentences import * import numpy as np", "= np.array(trainTitle) uniIdx = np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0])", "validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0]) for i in range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx ==", "trainTitle[trainIdx] trainRes = np.array(trainRes) y0Train = trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx]", "y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3) yTrain = onehot(y0Train,3) vocab_size = len(w2v_model.vocab_hash) return x1Train, x1Valid,", "= np.array(trainDocs) trainTitle = np.array(trainTitle) uniIdx = np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx =", "in range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0 for i in range(len(trainTitle)): maxTitleLength", "data2[np.where(data==i),i]= 1 return data2 def get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text), max_document_length],dtype=np.int32) for i,x in", "= vocab[w] else : text_array[i, j] = vocab['the'] return text_array def loaddata(w2v_model,typeOfClassify =", "\"\"\" Created on Sun Jan 21 15:05:24 2018 @author: Hendry \"\"\" from read_data", "x for j, w in enumerate(words): if w in vocab: text_array[i, j] =", "def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1): train_bodies = readRawData('train_bodies.csv') if useTextsum == 0: trainDocs", "= np.array(trainTitleDocs) trainDocs = np.array(trainDocs) trainTitle = np.array(trainTitle) uniIdx = np.unique(trainTitleIdx) uniIdxTest =", "as np def onehot(data,nClass): data2 = np.zeros([len(data),nClass]) for i in range(nClass): data2[np.where(data==i),i]= 1", "= open('./fnc_data/train_1.txt','r') data = f.readlines() f.close() trainDocs = TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances", "trainTitleDocs = [[] for i in range(len(trainTitle))] for i in range(len(trainTitle)): idx =", "idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs) trainDocs = np.array(trainDocs) trainTitle = np.array(trainTitle)", "y0Train = y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1 yValid", "x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3) yTrain = onehot(y0Train,3) vocab_size = len(w2v_model.vocab_hash)", "[[] for i in range(len(trainTitle))] for i in range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])]", "= list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx] trainRes = np.array(trainRes) y0Train =", "y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2) yTrain = onehot(y0Train,2) elif typeOfClassify==2: x1Train = x1Train[y0Train>0]", "=trainRes.astype('int') maxDocLength = 0 for i in range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength =", "= vocab['the'] return text_array def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1): train_bodies = readRawData('train_bodies.csv') if", "x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3) yTrain = onehot(y0Train,3)", "0: trainDocs = TokenizeSentences(splitData(train_bodies,1)) else: f = open('./fnc_data/train_1.txt','r') data = f.readlines() f.close() trainDocs", "if w in vocab: text_array[i, j] = vocab[w] else : text_array[i, j] =", "get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text), max_document_length],dtype=np.int32) for i,x in enumerate(text): words = x for", "data2 = np.zeros([len(data),nClass]) for i in range(nClass): data2[np.where(data==i),i]= 1 return data2 def get_text_idx(text,vocab,max_document_length):", "w in vocab: text_array[i, j] = vocab[w] else : text_array[i, j] = vocab['the']", "import numpy as np def onehot(data,nClass): data2 = np.zeros([len(data),nClass]) for i in range(nClass):", "= trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx] trainRes = np.array(trainRes) y0Train = trainRes[trainIdx] x1Valid =", "trainTitle[validIdx] y0Valid = trainRes[validIdx] if typeOfClassify==0: yValid = onehot(y0Valid,4) yTrain = onehot(y0Train,4) elif", "onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2) yTrain = onehot(y0Train,2) elif typeOfClassify==2:", "validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx = sorted(validIdx) fullIdx = list(range(len(trainTitleIdx))) trainIdx =", "trainTitle = np.array(trainTitle) uniIdx = np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx ==", "import * from TokenizeSentences import * import numpy as np def onehot(data,nClass): data2", "yValid = onehot(y0Valid,4) yTrain = onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2)", "0 for i in range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0 for i", "= readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1'", "np.zeros([len(text), max_document_length],dtype=np.int32) for i,x in enumerate(text): words = x for j, w in", "= f.readlines() f.close() trainDocs = TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv') trainTitle", "= trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx] y0Valid = trainRes[validIdx] if typeOfClassify==0: yValid = onehot(y0Valid,4)", "= y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3) yTrain = onehot(y0Train,3) vocab_size = len(w2v_model.vocab_hash) return x1Train,", "def onehot(data,nClass): data2 = np.zeros([len(data),nClass]) for i in range(nClass): data2[np.where(data==i),i]= 1 return data2", "in enumerate(words): if w in vocab: text_array[i, j] = vocab[w] else : text_array[i,", "trainRes =trainRes.astype('int') maxDocLength = 0 for i in range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength", "trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength", "trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs) trainDocs = np.array(trainDocs) trainTitle = np.array(trainTitle) uniIdx = np.unique(trainTitleIdx)", "np.array(trainRes) y0Train = trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx] y0Valid = trainRes[validIdx]", "np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs) trainDocs = np.array(trainDocs) trainTitle = np.array(trainTitle) uniIdx =", "x1Train = trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx] trainRes = np.array(trainRes) y0Train = trainRes[trainIdx] x1Valid", "trainRes = np.array(trainRes) y0Train = trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx] y0Valid", "i in range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs) trainDocs = np.array(trainDocs)", "= onehot(y0Valid,4) yTrain = onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2) yTrain", "x1Train[y0Train>0] x2Train = x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0]", "text_array[i, j] = vocab[w] else : text_array[i, j] = vocab['the'] return text_array def", "from TokenizeSentences import * import numpy as np def onehot(data,nClass): data2 = np.zeros([len(data),nClass])", "def get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text), max_document_length],dtype=np.int32) for i,x in enumerate(text): words = x", "trainRes[validIdx] if typeOfClassify==0: yValid = onehot(y0Valid,4) yTrain = onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1", "= onehot(y0Valid,3) yTrain = onehot(y0Train,3) vocab_size = len(w2v_model.vocab_hash) return x1Train, x1Valid, x2Train, x2Valid,", "= get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[] for i in range(len(trainTitle))] for", "range(len(trainTitle))] for i in range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs) trainDocs", "uniIdxTest[i+1])) validIdx = sorted(validIdx) fullIdx = list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx]", "fullIdx = list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx] trainRes", "= readRawData('train_bodies.csv') if useTextsum == 0: trainDocs = TokenizeSentences(splitData(train_bodies,1)) else: f = open('./fnc_data/train_1.txt','r')", "maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0 for i in range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i]))", "= max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[] for i", "trainTitleDocs = np.array(trainTitleDocs) trainDocs = np.array(trainDocs) trainTitle = np.array(trainTitle) uniIdx = np.unique(trainTitleIdx) uniIdxTest", "in enumerate(text): words = x for j, w in enumerate(words): if w in", "range(nClass): data2[np.where(data==i),i]= 1 return data2 def get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text), max_document_length],dtype=np.int32) for i,x", "yValid = onehot(y0Valid,3) yTrain = onehot(y0Train,3) vocab_size = len(w2v_model.vocab_hash) return x1Train, x1Valid, x2Train,", "read_data import * from TokenizeSentences import * import numpy as np def onehot(data,nClass):", "= x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3) yTrain =", "j] = vocab['the'] return text_array def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1): train_bodies = readRawData('train_bodies.csv')", "Jan 21 15:05:24 2018 @author: Hendry \"\"\" from read_data import * from TokenizeSentences", "# -*- coding: utf-8 -*- \"\"\" Created on Sun Jan 21 15:05:24 2018", "np.array(trainTitle) uniIdx = np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0]) for", "15:05:24 2018 @author: Hendry \"\"\" from read_data import * from TokenizeSentences import *", "trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength = 0 for i in range(len(trainDocs)): maxDocLength =", "= onehot(y0Train,3) vocab_size = len(w2v_model.vocab_hash) return x1Train, x1Valid, x2Train, x2Valid, yTrain, yValid, vocab_size", "for j, w in enumerate(words): if w in vocab: text_array[i, j] = vocab[w]", "train_bodies = readRawData('train_bodies.csv') if useTextsum == 0: trainDocs = TokenizeSentences(splitData(train_bodies,1)) else: f =", "data2 def get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text), max_document_length],dtype=np.int32) for i,x in enumerate(text): words =", "= onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2) yTrain = onehot(y0Train,2) elif", "x1Train = x1Train[y0Train>0] x2Train = x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0] x2Valid", "x1Valid = x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3) yTrain", "vocab['the'] return text_array def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1): train_bodies = readRawData('train_bodies.csv') if useTextsum", "0 for i in range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle =", "elif typeOfClassify==2: x1Train = x1Train[y0Train>0] x2Train = x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1 x1Valid =", "uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0]) for i in range(len(uniIdxTest)-1): validIdx", "coding: utf-8 -*- \"\"\" Created on Sun Jan 21 15:05:24 2018 @author: Hendry", "in vocab: text_array[i, j] = vocab[w] else : text_array[i, j] = vocab['the'] return", "== uniIdxTest[0]) for i in range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx =", "onehot(y0Valid,4) yTrain = onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2) yTrain =", "= np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs) trainDocs = np.array(trainDocs) trainTitle = np.array(trainTitle) uniIdx", "= max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0 for i in range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs", "list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx] trainRes = np.array(trainRes) y0Train = trainRes[trainIdx]", "trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength = 0 for i in range(len(trainDocs)):", "for i in range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx = sorted(validIdx) fullIdx", "21 15:05:24 2018 @author: Hendry \"\"\" from read_data import * from TokenizeSentences import", "np.array(trainTitleDocs) trainDocs = np.array(trainDocs) trainTitle = np.array(trainTitle) uniIdx = np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):]", "= np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength = 0 for i", "= trainTitle[trainIdx] trainRes = np.array(trainRes) y0Train = trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx] x2Valid =", "= trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx] y0Valid = trainRes[validIdx] if typeOfClassify==0:", "-*- \"\"\" Created on Sun Jan 21 15:05:24 2018 @author: Hendry \"\"\" from", "text_array def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1): train_bodies = readRawData('train_bodies.csv') if useTextsum == 0:", "i in range(nClass): data2[np.where(data==i),i]= 1 return data2 def get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text), max_document_length],dtype=np.int32)", "enumerate(text): words = x for j, w in enumerate(words): if w in vocab:", "x2Valid = x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3) yTrain = onehot(y0Train,3) vocab_size", "i in range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs", "yTrain = onehot(y0Train,3) vocab_size = len(w2v_model.vocab_hash) return x1Train, x1Valid, x2Train, x2Valid, yTrain, yValid,", "trainDocs = TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx", "onehot(y0Valid,3) yTrain = onehot(y0Train,3) vocab_size = len(w2v_model.vocab_hash) return x1Train, x1Valid, x2Train, x2Valid, yTrain,", "Hendry \"\"\" from read_data import * from TokenizeSentences import * import numpy as", "= x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3) yTrain = onehot(y0Train,3) vocab_size =", "i in range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx = sorted(validIdx) fullIdx =", "trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength = 0 for i in range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i]))", "range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx = sorted(validIdx) fullIdx = list(range(len(trainTitleIdx))) trainIdx", "else : text_array[i, j] = vocab['the'] return text_array def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1):", "in range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs = np.array(trainTitleDocs) trainDocs = np.array(trainDocs) trainTitle", "= np.array(trainRes) y0Train = trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx] y0Valid =", "= TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx =", "w in enumerate(words): if w in vocab: text_array[i, j] = vocab[w] else :", "TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int')", "= TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes", "max_document_length],dtype=np.int32) for i,x in enumerate(text): words = x for j, w in enumerate(words):", "uniIdx = np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0]) for i", "= uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0]) for i in range(len(uniIdxTest)-1): validIdx =", "vocab: text_array[i, j] = vocab[w] else : text_array[i, j] = vocab['the'] return text_array", "maxDocLength = 0 for i in range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0", "y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3)", "TokenizeSentences(splitData(train_bodies,1)) else: f = open('./fnc_data/train_1.txt','r') data = f.readlines() f.close() trainDocs = TokenizeSentences(data) trainDocsIdx", "trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx] y0Valid = trainRes[validIdx] if typeOfClassify==0: yValid = onehot(y0Valid,4) yTrain", "x2Train = x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0] y0Valid", "validIdx = sorted(validIdx) fullIdx = list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx] x2Train", "2018 @author: Hendry \"\"\" from read_data import * from TokenizeSentences import * import", "1): train_bodies = readRawData('train_bodies.csv') if useTextsum == 0: trainDocs = TokenizeSentences(splitData(train_bodies,1)) else: f", "trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx] trainRes = np.array(trainRes) y0Train = trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx]", "1 return data2 def get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text), max_document_length],dtype=np.int32) for i,x in enumerate(text):", "in range(nClass): data2[np.where(data==i),i]= 1 return data2 def get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text), max_document_length],dtype=np.int32) for", "= 0 for i in range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0 for", "= get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[] for i in range(len(trainTitle))] for i in range(len(trainTitle)):", "trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[] for i in range(len(trainTitle))] for i in", "uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0]) for i in range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx", "readRawData('train_bodies.csv') if useTextsum == 0: trainDocs = TokenizeSentences(splitData(train_bodies,1)) else: f = open('./fnc_data/train_1.txt','r') data", "trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx] trainRes = np.array(trainRes) y0Train", "yTrain = onehot(y0Train,2) elif typeOfClassify==2: x1Train = x1Train[y0Train>0] x2Train = x2Train[y0Train>0] y0Train =", "np.zeros([len(data),nClass]) for i in range(nClass): data2[np.where(data==i),i]= 1 return data2 def get_text_idx(text,vocab,max_document_length): text_array =", "= onehot(y0Train,2) elif typeOfClassify==2: x1Train = x1Train[y0Train>0] x2Train = x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1", "sorted(validIdx) fullIdx = list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx]", "y0Valid = y0Valid[y0Valid>0]-1 yValid = onehot(y0Valid,3) yTrain = onehot(y0Train,3) vocab_size = len(w2v_model.vocab_hash) return", "j, w in enumerate(words): if w in vocab: text_array[i, j] = vocab[w] else", "= [[] for i in range(len(trainTitle))] for i in range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i])", "= y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1 yValid =", "enumerate(words): if w in vocab: text_array[i, j] = vocab[w] else : text_array[i, j]", "for i in range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength)", "from read_data import * from TokenizeSentences import * import numpy as np def", "maxTitleLength = 0 for i in range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength)", "= x1Train[y0Train>0] x2Train = x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0] x2Valid =", "x2Train = trainTitle[trainIdx] trainRes = np.array(trainRes) y0Train = trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx] x2Valid", "for i in range(nClass): data2[np.where(data==i),i]= 1 return data2 def get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text),", "0,useTextsum= 1): train_bodies = readRawData('train_bodies.csv') if useTextsum == 0: trainDocs = TokenizeSentences(splitData(train_bodies,1)) else:", "f.close() trainDocs = TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0))", "if useTextsum == 0: trainDocs = TokenizeSentences(splitData(train_bodies,1)) else: f = open('./fnc_data/train_1.txt','r') data =", "uniIdxTest[0]) for i in range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx = sorted(validIdx)", "get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[] for i in range(len(trainTitle))] for i in range(len(trainTitle)): idx", "f = open('./fnc_data/train_1.txt','r') data = f.readlines() f.close() trainDocs = TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int')", "trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[] for i in range(len(trainTitle))]", "np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0]) for i in range(len(uniIdxTest)-1):", "TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int')", "get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[] for i in range(len(trainTitle))] for i", "max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0 for i in range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs =", "= np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0]) for i in", "== uniIdxTest[i+1])) validIdx = sorted(validIdx) fullIdx = list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train =", "on Sun Jan 21 15:05:24 2018 @author: Hendry \"\"\" from read_data import *", "maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[] for", "= np.argwhere(trainTitleIdx == uniIdxTest[0]) for i in range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1]))", "y0Valid = trainRes[validIdx] if typeOfClassify==0: yValid = onehot(y0Valid,4) yTrain = onehot(y0Train,4) elif typeOfClassify==1:", "== 0: trainDocs = TokenizeSentences(splitData(train_bodies,1)) else: f = open('./fnc_data/train_1.txt','r') data = f.readlines() f.close()", "y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2) yTrain = onehot(y0Train,2) elif typeOfClassify==2: x1Train = x1Train[y0Train>0] x2Train", "x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0] y0Valid = y0Valid[y0Valid>0]-1", "j] = vocab[w] else : text_array[i, j] = vocab['the'] return text_array def loaddata(w2v_model,typeOfClassify", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Sun Jan 21 15:05:24", "words = x for j, w in enumerate(words): if w in vocab: text_array[i,", "x1Valid = trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx] y0Valid = trainRes[validIdx] if typeOfClassify==0: yValid =", "trainDocs = TokenizeSentences(splitData(train_bodies,1)) else: f = open('./fnc_data/train_1.txt','r') data = f.readlines() f.close() trainDocs =", "= np.zeros([len(text), max_document_length],dtype=np.int32) for i,x in enumerate(text): words = x for j, w", "typeOfClassify==2: x1Train = x1Train[y0Train>0] x2Train = x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0]", "yTrain = onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2) yTrain = onehot(y0Train,2)", "numpy as np def onehot(data,nClass): data2 = np.zeros([len(data),nClass]) for i in range(nClass): data2[np.where(data==i),i]=", "Created on Sun Jan 21 15:05:24 2018 @author: Hendry \"\"\" from read_data import", "= 0,useTextsum= 1): train_bodies = readRawData('train_bodies.csv') if useTextsum == 0: trainDocs = TokenizeSentences(splitData(train_bodies,1))", "np.array(trainDocs) trainTitle = np.array(trainTitle) uniIdx = np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx = np.argwhere(trainTitleIdx", "= np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength =", "elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2) yTrain = onehot(y0Train,2) elif typeOfClassify==2: x1Train", "f.readlines() f.close() trainDocs = TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv') trainTitle =", "onehot(y0Valid,2) yTrain = onehot(y0Train,2) elif typeOfClassify==2: x1Train = x1Train[y0Train>0] x2Train = x2Train[y0Train>0] y0Train", "in range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs =", "* from TokenizeSentences import * import numpy as np def onehot(data,nClass): data2 =", "np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx = sorted(validIdx) fullIdx = list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train", "@author: Hendry \"\"\" from read_data import * from TokenizeSentences import * import numpy", "= x for j, w in enumerate(words): if w in vocab: text_array[i, j]", "max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[] for i in", "if typeOfClassify==0: yValid = onehot(y0Valid,4) yTrain = onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid", "= 0 for i in range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle", "= sorted(validIdx) fullIdx = list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx] x2Train =", "vocab[w] else : text_array[i, j] = vocab['the'] return text_array def loaddata(w2v_model,typeOfClassify = 0,useTextsum=", "trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength = 0 for", "list(range(len(trainTitleIdx))) trainIdx = list(set(fullIdx).difference(set(validIdx))) x1Train = trainTitleDocs[trainIdx] x2Train = trainTitle[trainIdx] trainRes = np.array(trainRes)", "np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength = 0", "trainDocs = np.array(trainDocs) trainTitle = np.array(trainTitle) uniIdx = np.unique(trainTitleIdx) uniIdxTest = uniIdx[round(0.95*len(uniIdx)):] validIdx", "range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0 for i in range(len(trainTitle)): maxTitleLength =", "data = f.readlines() f.close() trainDocs = TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv')", "readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2'", "typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid = onehot(y0Valid,2) yTrain = onehot(y0Train,2) elif typeOfClassify==2: x1Train =", "= onehot(y0Valid,2) yTrain = onehot(y0Train,2) elif typeOfClassify==2: x1Train = x1Train[y0Train>0] x2Train = x2Train[y0Train>0]", "in range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx = sorted(validIdx) fullIdx = list(range(len(trainTitleIdx)))", "range(len(trainTitle)): maxTitleLength = max(maxTitleLength,len(trainTitle[i])) trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength) trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength) trainTitleDocs = [[]", "= TokenizeSentences(splitData(train_bodies,1)) else: f = open('./fnc_data/train_1.txt','r') data = f.readlines() f.close() trainDocs = TokenizeSentences(data)", "= trainTitle[validIdx] y0Valid = trainRes[validIdx] if typeOfClassify==0: yValid = onehot(y0Valid,4) yTrain = onehot(y0Train,4)", "np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2))", "x2Valid = trainTitle[validIdx] y0Valid = trainRes[validIdx] if typeOfClassify==0: yValid = onehot(y0Valid,4) yTrain =", "= trainRes[validIdx] if typeOfClassify==0: yValid = onehot(y0Valid,4) yTrain = onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1", "for i,x in enumerate(text): words = x for j, w in enumerate(words): if", "i,x in enumerate(text): words = x for j, w in enumerate(words): if w", "trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes", "onehot(data,nClass): data2 = np.zeros([len(data),nClass]) for i in range(nClass): data2[np.where(data==i),i]= 1 return data2 def", "useTextsum == 0: trainDocs = TokenizeSentences(splitData(train_bodies,1)) else: f = open('./fnc_data/train_1.txt','r') data = f.readlines()", "= np.array(splitData(train_bodies,0)).astype('int') train_stances = readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes =", "-*- coding: utf-8 -*- \"\"\" Created on Sun Jan 21 15:05:24 2018 @author:", "Sun Jan 21 15:05:24 2018 @author: Hendry \"\"\" from read_data import * from", "open('./fnc_data/train_1.txt','r') data = f.readlines() f.close() trainDocs = TokenizeSentences(data) trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int') train_stances =", "y0Train = trainRes[trainIdx] x1Valid = trainTitleDocs[validIdx] x2Valid = trainTitle[validIdx] y0Valid = trainRes[validIdx] if", "for i in range(len(trainDocs)): maxDocLength = max(maxDocLength,len(trainDocs[i])) maxTitleLength = 0 for i in", "return data2 def get_text_idx(text,vocab,max_document_length): text_array = np.zeros([len(text), max_document_length],dtype=np.int32) for i,x in enumerate(text): words", "return text_array def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1): train_bodies = readRawData('train_bodies.csv') if useTextsum ==", "else: f = open('./fnc_data/train_1.txt','r') data = f.readlines() f.close() trainDocs = TokenizeSentences(data) trainDocsIdx =", "TokenizeSentences import * import numpy as np def onehot(data,nClass): data2 = np.zeros([len(data),nClass]) for", "loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1): train_bodies = readRawData('train_bodies.csv') if useTextsum == 0: trainDocs =", "np.argwhere(trainTitleIdx == uniIdxTest[0]) for i in range(len(uniIdxTest)-1): validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1])) validIdx", "= x2Train[y0Train>0] y0Train = y0Train[y0Train>0]-1 x1Valid = x1Valid[y0Valid>0] x2Valid = x2Valid[y0Valid>0] y0Valid =", "np def onehot(data,nClass): data2 = np.zeros([len(data),nClass]) for i in range(nClass): data2[np.where(data==i),i]= 1 return", "typeOfClassify==0: yValid = onehot(y0Valid,4) yTrain = onehot(y0Train,4) elif typeOfClassify==1: y0Train[y0Train>0]=1 y0Valid[y0Valid>0]=1 yValid =", "trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3'", "import * import numpy as np def onehot(data,nClass): data2 = np.zeros([len(data),nClass]) for i", "np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0' trainRes[np.where(trainRes=='agree')]='1' trainRes[np.where(trainRes=='disagree')]='2' trainRes[np.where(trainRes=='discuss')]='3' trainRes =trainRes.astype('int') maxDocLength = 0 for i in", "train_stances = readRawData('train_stances.csv') trainTitle = TokenizeSentences(splitData(train_stances,0)) trainTitleIdx = np.array(splitData(train_stances,1)).astype('int') trainRes = np.array(splitData(train_stances,2)) trainRes[np.where(trainRes=='unrelated')]='0'", "for i in range(len(trainTitle))] for i in range(len(trainTitle)): idx = np.where(trainDocsIdx==trainTitleIdx[i]) trainTitleDocs[i]=trainDocs[int(idx[0])] trainTitleDocs", "utf-8 -*- \"\"\" Created on Sun Jan 21 15:05:24 2018 @author: Hendry \"\"\"", "* import numpy as np def onehot(data,nClass): data2 = np.zeros([len(data),nClass]) for i in" ]
[ "sys from pathlib import Path sys.path.insert(0, os.path.abspath('..')) import googlemaps_helpers ROOT = Path('.') DATA_DIR", "os import sys from pathlib import Path sys.path.insert(0, os.path.abspath('..')) import googlemaps_helpers ROOT =", "pathlib import Path sys.path.insert(0, os.path.abspath('..')) import googlemaps_helpers ROOT = Path('.') DATA_DIR = Path('tests/data')", "import sys from pathlib import Path sys.path.insert(0, os.path.abspath('..')) import googlemaps_helpers ROOT = Path('.')", "from pathlib import Path sys.path.insert(0, os.path.abspath('..')) import googlemaps_helpers ROOT = Path('.') DATA_DIR =", "import os import sys from pathlib import Path sys.path.insert(0, os.path.abspath('..')) import googlemaps_helpers ROOT" ]
[ "(self.test_data - self.train_mean) / self.train_std return train_data, test_data def denormalize_samples(self, samples): denormalized =", "import shutil import wget import numpy as np DATA_FILE_NAMES = [\"train1\", \"test1\", \"train2\",", "in samples] return np.array(sample_data) def normalize_data(self): train_data = (self.train_data - self.train_mean) / self.train_std", "train_data = (self.train_data - self.train_mean) / self.train_std test_data = (self.test_data - self.train_mean) /", "def denormalize_samples(self, samples): denormalized = samples * self.train_std + self.train_mean return denormalized def", "file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\") return file_paths class Dataset: def __init__(self, train_data_path, test_data_path):", "print(\"\\nDOWNLOAD COMPLETED\") return file_paths class Dataset: def __init__(self, train_data_path, test_data_path): self.train_data = self.read_data(train_data_path)", "np.std(self.train_data, axis=0) @staticmethod def read_data(data_path): train_file = open(data_path, \"r\") samples = train_file.readlines() sample_data", "normalize_data(self): train_data = (self.train_data - self.train_mean) / self.train_std test_data = (self.test_data - self.train_mean)", "self.train_std + self.train_mean return denormalized def denormalize_labels(self, labels): denormalized = labels * self.train_std[1]", "os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path = os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths = [] for", "os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths = [] for file_idx in range(len(file_names)): file_path = os.path.join(folder_path,", "train_data_path, test_data_path): self.train_data = self.read_data(train_data_path) self.test_data = self.read_data(test_data_path) self.train_mean = np.mean(self.train_data, axis=0) self.train_std", "\"data\")) folder_path = os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths = [] for file_idx in range(len(file_names)):", "def download_data(file_names, file_urls): cwd = os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path =", "\"test1\", \"train2\", \"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL, file_name) for file_name in", "\"train2\", \"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL, file_name) for file_name in DATA_FILE_NAMES]", "self.read_data(train_data_path) self.test_data = self.read_data(test_data_path) self.train_mean = np.mean(self.train_data, axis=0) self.train_std = np.std(self.train_data, axis=0) @staticmethod", "FILE_URLS = [os.path.join(BASE_URL, file_name) for file_name in DATA_FILE_NAMES] def download_data(file_names, file_urls): cwd =", "sample_data = [list(map(float, sample.split())) for sample in samples] return np.array(sample_data) def normalize_data(self): train_data", "<filename>HW2/dataset_setup.py import os import shutil import wget import numpy as np DATA_FILE_NAMES =", "train_data, test_data def denormalize_samples(self, samples): denormalized = samples * self.train_std + self.train_mean return", "np DATA_FILE_NAMES = [\"train1\", \"test1\", \"train2\", \"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL,", "for file_name in DATA_FILE_NAMES] def download_data(file_names, file_urls): cwd = os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")):", "= open(data_path, \"r\") samples = train_file.readlines() sample_data = [list(map(float, sample.split())) for sample in", "os import shutil import wget import numpy as np DATA_FILE_NAMES = [\"train1\", \"test1\",", "= os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path = os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths", "def read_data(data_path): train_file = open(data_path, \"r\") samples = train_file.readlines() sample_data = [list(map(float, sample.split()))", "self.train_mean) / self.train_std test_data = (self.test_data - self.train_mean) / self.train_std return train_data, test_data", "wget import numpy as np DATA_FILE_NAMES = [\"train1\", \"test1\", \"train2\", \"test2\"] BASE_URL =", "read_data(data_path): train_file = open(data_path, \"r\") samples = train_file.readlines() sample_data = [list(map(float, sample.split())) for", "file_paths class Dataset: def __init__(self, train_data_path, test_data_path): self.train_data = self.read_data(train_data_path) self.test_data = self.read_data(test_data_path)", "self.train_mean = np.mean(self.train_data, axis=0) self.train_std = np.std(self.train_data, axis=0) @staticmethod def read_data(data_path): train_file =", "wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\") return file_paths class Dataset: def __init__(self, train_data_path, test_data_path): self.train_data", "np.mean(self.train_data, axis=0) self.train_std = np.std(self.train_data, axis=0) @staticmethod def read_data(data_path): train_file = open(data_path, \"r\")", "[list(map(float, sample.split())) for sample in samples] return np.array(sample_data) def normalize_data(self): train_data = (self.train_data", "shutil import wget import numpy as np DATA_FILE_NAMES = [\"train1\", \"test1\", \"train2\", \"test2\"]", "+ \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\") return file_paths class Dataset: def __init__(self,", "= [\"train1\", \"test1\", \"train2\", \"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL, file_name) for", "file_names[file_idx] + \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\") return file_paths class Dataset: def", "= os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths = [] for file_idx in range(len(file_names)): file_path =", "samples = train_file.readlines() sample_data = [list(map(float, sample.split())) for sample in samples] return np.array(sample_data)", "/ self.train_std return train_data, test_data def denormalize_samples(self, samples): denormalized = samples * self.train_std", "file_name) for file_name in DATA_FILE_NAMES] def download_data(file_names, file_urls): cwd = os.getcwd() if os.path.isdir(os.path.join(cwd,", "in DATA_FILE_NAMES] def download_data(file_names, file_urls): cwd = os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\"))", "folder_path = os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths = [] for file_idx in range(len(file_names)): file_path", "file_paths = [] for file_idx in range(len(file_names)): file_path = os.path.join(folder_path, file_names[file_idx] + \".txt\")", "denormalized def denormalize_labels(self, labels): denormalized = labels * self.train_std[1] + self.train_mean[1] return denormalized", "class Dataset: def __init__(self, train_data_path, test_data_path): self.train_data = self.read_data(train_data_path) self.test_data = self.read_data(test_data_path) self.train_mean", "os.mkdir(folder_path) file_paths = [] for file_idx in range(len(file_names)): file_path = os.path.join(folder_path, file_names[file_idx] +", "train_file = open(data_path, \"r\") samples = train_file.readlines() sample_data = [list(map(float, sample.split())) for sample", "for file_idx in range(len(file_names)): file_path = os.path.join(folder_path, file_names[file_idx] + \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path)", "__init__(self, train_data_path, test_data_path): self.train_data = self.read_data(train_data_path) self.test_data = self.read_data(test_data_path) self.train_mean = np.mean(self.train_data, axis=0)", "\".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\") return file_paths class Dataset: def __init__(self, train_data_path,", "samples] return np.array(sample_data) def normalize_data(self): train_data = (self.train_data - self.train_mean) / self.train_std test_data", "self.test_data = self.read_data(test_data_path) self.train_mean = np.mean(self.train_data, axis=0) self.train_std = np.std(self.train_data, axis=0) @staticmethod def", "for sample in samples] return np.array(sample_data) def normalize_data(self): train_data = (self.train_data - self.train_mean)", "np.array(sample_data) def normalize_data(self): train_data = (self.train_data - self.train_mean) / self.train_std test_data = (self.test_data", "import os import shutil import wget import numpy as np DATA_FILE_NAMES = [\"train1\",", "samples): denormalized = samples * self.train_std + self.train_mean return denormalized def denormalize_labels(self, labels):", "axis=0) self.train_std = np.std(self.train_data, axis=0) @staticmethod def read_data(data_path): train_file = open(data_path, \"r\") samples", "def __init__(self, train_data_path, test_data_path): self.train_data = self.read_data(train_data_path) self.test_data = self.read_data(test_data_path) self.train_mean = np.mean(self.train_data,", "self.read_data(test_data_path) self.train_mean = np.mean(self.train_data, axis=0) self.train_std = np.std(self.train_data, axis=0) @staticmethod def read_data(data_path): train_file", "[] for file_idx in range(len(file_names)): file_path = os.path.join(folder_path, file_names[file_idx] + \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx],", "return np.array(sample_data) def normalize_data(self): train_data = (self.train_data - self.train_mean) / self.train_std test_data =", "DATA_FILE_NAMES = [\"train1\", \"test1\", \"train2\", \"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL, file_name)", "* self.train_std + self.train_mean return denormalized def denormalize_labels(self, labels): denormalized = labels *", "file_idx in range(len(file_names)): file_path = os.path.join(folder_path, file_names[file_idx] + \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD", "BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL, file_name) for file_name in DATA_FILE_NAMES] def download_data(file_names,", "[\"train1\", \"test1\", \"train2\", \"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL, file_name) for file_name", "\"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL, file_name) for file_name in DATA_FILE_NAMES] def", "\"r\") samples = train_file.readlines() sample_data = [list(map(float, sample.split())) for sample in samples] return", "= (self.train_data - self.train_mean) / self.train_std test_data = (self.test_data - self.train_mean) / self.train_std", "file_path) print(\"\\nDOWNLOAD COMPLETED\") return file_paths class Dataset: def __init__(self, train_data_path, test_data_path): self.train_data =", "import numpy as np DATA_FILE_NAMES = [\"train1\", \"test1\", \"train2\", \"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\"", "/ self.train_std test_data = (self.test_data - self.train_mean) / self.train_std return train_data, test_data def", "= samples * self.train_std + self.train_mean return denormalized def denormalize_labels(self, labels): denormalized =", "= np.mean(self.train_data, axis=0) self.train_std = np.std(self.train_data, axis=0) @staticmethod def read_data(data_path): train_file = open(data_path,", "self.train_std return train_data, test_data def denormalize_samples(self, samples): denormalized = samples * self.train_std +", "return train_data, test_data def denormalize_samples(self, samples): denormalized = samples * self.train_std + self.train_mean", "= (self.test_data - self.train_mean) / self.train_std return train_data, test_data def denormalize_samples(self, samples): denormalized", "download_data(file_names, file_urls): cwd = os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path = os.path.join(cwd,", "file_path = os.path.join(folder_path, file_names[file_idx] + \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\") return file_paths", "cwd = os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path = os.path.join(cwd, \"data\") os.mkdir(folder_path)", "Dataset: def __init__(self, train_data_path, test_data_path): self.train_data = self.read_data(train_data_path) self.test_data = self.read_data(test_data_path) self.train_mean =", "[os.path.join(BASE_URL, file_name) for file_name in DATA_FILE_NAMES] def download_data(file_names, file_urls): cwd = os.getcwd() if", "= self.read_data(test_data_path) self.train_mean = np.mean(self.train_data, axis=0) self.train_std = np.std(self.train_data, axis=0) @staticmethod def read_data(data_path):", "= np.std(self.train_data, axis=0) @staticmethod def read_data(data_path): train_file = open(data_path, \"r\") samples = train_file.readlines()", "denormalize_samples(self, samples): denormalized = samples * self.train_std + self.train_mean return denormalized def denormalize_labels(self,", "DATA_FILE_NAMES] def download_data(file_names, file_urls): cwd = os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path", "shutil.rmtree(os.path.join(cwd, \"data\")) folder_path = os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths = [] for file_idx in", "- self.train_mean) / self.train_std return train_data, test_data def denormalize_samples(self, samples): denormalized = samples", "samples * self.train_std + self.train_mean return denormalized def denormalize_labels(self, labels): denormalized = labels", "self.train_mean) / self.train_std return train_data, test_data def denormalize_samples(self, samples): denormalized = samples *", "if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path = os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths = []", "def normalize_data(self): train_data = (self.train_data - self.train_mean) / self.train_std test_data = (self.test_data -", "in range(len(file_names)): file_path = os.path.join(folder_path, file_names[file_idx] + \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\")", "os.path.join(folder_path, file_names[file_idx] + \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\") return file_paths class Dataset:", "numpy as np DATA_FILE_NAMES = [\"train1\", \"test1\", \"train2\", \"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS", "denormalized = samples * self.train_std + self.train_mean return denormalized def denormalize_labels(self, labels): denormalized", "return denormalized def denormalize_labels(self, labels): denormalized = labels * self.train_std[1] + self.train_mean[1] return", "- self.train_mean) / self.train_std test_data = (self.test_data - self.train_mean) / self.train_std return train_data,", "file_urls): cwd = os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path = os.path.join(cwd, \"data\")", "self.train_std = np.std(self.train_data, axis=0) @staticmethod def read_data(data_path): train_file = open(data_path, \"r\") samples =", "open(data_path, \"r\") samples = train_file.readlines() sample_data = [list(map(float, sample.split())) for sample in samples]", "self.train_std test_data = (self.test_data - self.train_mean) / self.train_std return train_data, test_data def denormalize_samples(self,", "COMPLETED\") return file_paths class Dataset: def __init__(self, train_data_path, test_data_path): self.train_data = self.read_data(train_data_path) self.test_data", "range(len(file_names)): file_path = os.path.join(folder_path, file_names[file_idx] + \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\") return", "sample.split())) for sample in samples] return np.array(sample_data) def normalize_data(self): train_data = (self.train_data -", "\"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path = os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths = [] for file_idx", "sample in samples] return np.array(sample_data) def normalize_data(self): train_data = (self.train_data - self.train_mean) /", "test_data_path): self.train_data = self.read_data(train_data_path) self.test_data = self.read_data(test_data_path) self.train_mean = np.mean(self.train_data, axis=0) self.train_std =", "= self.read_data(train_data_path) self.test_data = self.read_data(test_data_path) self.train_mean = np.mean(self.train_data, axis=0) self.train_std = np.std(self.train_data, axis=0)", "train_file.readlines() sample_data = [list(map(float, sample.split())) for sample in samples] return np.array(sample_data) def normalize_data(self):", "= [list(map(float, sample.split())) for sample in samples] return np.array(sample_data) def normalize_data(self): train_data =", "import wget import numpy as np DATA_FILE_NAMES = [\"train1\", \"test1\", \"train2\", \"test2\"] BASE_URL", "as np DATA_FILE_NAMES = [\"train1\", \"test1\", \"train2\", \"test2\"] BASE_URL = \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS =", "file_name in DATA_FILE_NAMES] def download_data(file_names, file_urls): cwd = os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd,", "test_data = (self.test_data - self.train_mean) / self.train_std return train_data, test_data def denormalize_samples(self, samples):", "return file_paths class Dataset: def __init__(self, train_data_path, test_data_path): self.train_data = self.read_data(train_data_path) self.test_data =", "= \"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL, file_name) for file_name in DATA_FILE_NAMES] def download_data(file_names, file_urls):", "@staticmethod def read_data(data_path): train_file = open(data_path, \"r\") samples = train_file.readlines() sample_data = [list(map(float,", "= os.path.join(folder_path, file_names[file_idx] + \".txt\") file_paths.append(file_path) wget.download(file_urls[file_idx], file_path) print(\"\\nDOWNLOAD COMPLETED\") return file_paths class", "= train_file.readlines() sample_data = [list(map(float, sample.split())) for sample in samples] return np.array(sample_data) def", "self.train_mean return denormalized def denormalize_labels(self, labels): denormalized = labels * self.train_std[1] + self.train_mean[1]", "= [os.path.join(BASE_URL, file_name) for file_name in DATA_FILE_NAMES] def download_data(file_names, file_urls): cwd = os.getcwd()", "(self.train_data - self.train_mean) / self.train_std test_data = (self.test_data - self.train_mean) / self.train_std return", "test_data def denormalize_samples(self, samples): denormalized = samples * self.train_std + self.train_mean return denormalized", "axis=0) @staticmethod def read_data(data_path): train_file = open(data_path, \"r\") samples = train_file.readlines() sample_data =", "self.train_data = self.read_data(train_data_path) self.test_data = self.read_data(test_data_path) self.train_mean = np.mean(self.train_data, axis=0) self.train_std = np.std(self.train_data,", "\"http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents\" FILE_URLS = [os.path.join(BASE_URL, file_name) for file_name in DATA_FILE_NAMES] def download_data(file_names, file_urls): cwd", "= [] for file_idx in range(len(file_names)): file_path = os.path.join(folder_path, file_names[file_idx] + \".txt\") file_paths.append(file_path)", "os.getcwd() if os.path.isdir(os.path.join(cwd, \"data\")): shutil.rmtree(os.path.join(cwd, \"data\")) folder_path = os.path.join(cwd, \"data\") os.mkdir(folder_path) file_paths =", "\"data\") os.mkdir(folder_path) file_paths = [] for file_idx in range(len(file_names)): file_path = os.path.join(folder_path, file_names[file_idx]", "+ self.train_mean return denormalized def denormalize_labels(self, labels): denormalized = labels * self.train_std[1] +" ]
[ "pfp \"\"\" import glob import os import six get_strategy = None StratGroup =", "def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False): \"\"\"Mutate the provided field (probably a Dom", "subfields without # triggering events field._pfp__snapshot(recurse=True) count = 0 for x in six.moves.range(num):", "modified and is no longer needed by the mutate() function. :param pfp.fields.Field field:", "FieldStrat = pfp.fuzz.strats.FieldStrat # load all of the built-in strategies for strat_file in", "os import six get_strategy = None StratGroup = None FieldStrat = None def", "for to_mutate_field in to_mutate: field_strat = strat.get_field_strat(to_mutate_field) if field_strat is not None: with_strats.append((to_mutate_field,", "field_strat)) # we don't need these ones anymore del to_mutate # save the", "current value of all subfields without # triggering events field._pfp__snapshot(recurse=True) count = 0", "strat.which(field) with_strats = [] for to_mutate_field in to_mutate: field_strat = strat.get_field_strat(to_mutate_field) if field_strat", "the current value of all subfields without # triggering events field._pfp__snapshot(recurse=True) count =", "(not an instance) :param int num: The number of mutations to yield :param", "__import__(\"pfp.fuzz.\" + mod_name) def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False): \"\"\"Mutate the provided field", "that were mutated in that iteration (if ``yield_changed`` is ``True``). It should also", "= set() idx_pool = set([x for x in six.moves.xrange(len(with_strats))]) # modify `at_once` number", "function will yield back the field after each mutation, optionally also yielding a", "``yield_changed`` is ``True``). It should also be noted that the yielded set of", "each mutation, optionally also yielding a ``set`` of fields that were mutated in", "yield_changed=False): \"\"\"Mutate the provided field (probably a Dom or struct instance) using the", "name of a strategy, or the actual strategy class (not an instance) :param", "need these ones anymore del to_mutate # save the current value of all", "six get_strategy = None StratGroup = None FieldStrat = None def init(): global", "by the mutate() function. :param pfp.fields.Field field: The field to mutate (can be", "more than once # since we're removing the idx after choosing it rand_idx", "get_strategy = None StratGroup = None FieldStrat = None def init(): global get_strategy", "= pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat # load all of the built-in strategies for", "field_strat is not None: with_strats.append((to_mutate_field, field_strat)) # we don't need these ones anymore", "\"\"\" This module contains the base classes used when defining mutation strategies for", "pfp.fuzz.strats.FieldStrat # load all of the built-in strategies for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")):", "StratGroup = None FieldStrat = None def init(): global get_strategy global StratGroup global", "chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed: yield field, chosen_fields else: # yield back the original", "idx_pool = set([x for x in six.moves.xrange(len(with_strats))]) # modify `at_once` number of fields", "-*- coding: utf-8 -*- \"\"\" This module contains the base classes used when", "mutated in that iteration (if ``yield_changed`` is ``True``). It should also be noted", "yield_changed: yield field, chosen_fields else: # yield back the original field yield field", "= [] for to_mutate_field in to_mutate: field_strat = strat.get_field_strat(to_mutate_field) if field_strat is not", "never pull the same idx from idx_pool more than once # since we're", "once. This function will yield back the field after each mutation, optionally also", "None FieldStrat = None def init(): global get_strategy global StratGroup global FieldStrat import", "fields at once. This function will yield back the field after each mutation,", "if field_strat is not None: with_strats.append((to_mutate_field, field_strat)) # we don't need these ones", "filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" + mod_name) def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False): \"\"\"Mutate", "and is no longer needed by the mutate() function. :param pfp.fields.Field field: The", "from idx_pool more than once # since we're removing the idx after choosing", "field (probably a Dom or struct instance) using the strategy specified with ``strat_name_or_class``,", "in six.moves.xrange(len(with_strats))]) # modify `at_once` number of fields OR len(with_strats) number of fields,", "removing the idx after choosing it rand_idx = rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat =", "``set`` of fields that were mutated in that iteration (if ``yield_changed`` is ``True``).", "whichever is lower for at_onces in six.moves.xrange(min(len(with_strats), at_once)): # we'll never pull the", "provided field (probably a Dom or struct instance) using the strategy specified with", "instance) using the strategy specified with ``strat_name_or_class``, yielding ``num`` mutations that affect up", "for x in six.moves.xrange(len(with_strats))]) # modify `at_once` number of fields OR len(with_strats) number", "chosen_fields else: # yield back the original field yield field # restore the", "to mutate at once :param bool yield_changed: Yield a list of fields changed", "+ mod_name) def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False): \"\"\"Mutate the provided field (probably", "= strat.get_field_strat(to_mutate_field) if field_strat is not None: with_strats.append((to_mutate_field, field_strat)) # we don't need", "than once # since we're removing the idx after choosing it rand_idx =", "mutation strategies for pfp \"\"\" import glob import os import six get_strategy =", "Yield a list of fields changed along with the mutated dom :returns: generator", "strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename = os.path.basename(strat_file) if filename in [\"__init__.py\", \"base.py\"]: continue", "of fields, # whichever is lower for at_onces in six.moves.xrange(min(len(with_strats), at_once)): # we'll", "not just Dom/Structs) :param strat_name_or_class: Can be the name of a strategy, or", "needed by the mutate() function. :param pfp.fields.Field field: The field to mutate (can", "= pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat # load all of the", "num=100, at_once=1, yield_changed=False): \"\"\"Mutate the provided field (probably a Dom or struct instance)", "yield field, chosen_fields else: # yield back the original field yield field #", "class (not an instance) :param int num: The number of mutations to yield", "strat.get_field_strat(to_mutate_field) if field_strat is not None: with_strats.append((to_mutate_field, field_strat)) # we don't need these", "pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat # load all", "the original field yield field # restore the saved value of all subfields", "all of the built-in strategies for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename = os.path.basename(strat_file)", "field, chosen_fields else: # yield back the original field yield field # restore", "a Dom or struct instance) using the strategy specified with ``strat_name_or_class``, yielding ``num``", "if filename in [\"__init__.py\", \"base.py\"]: continue mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" +", "that iteration (if ``yield_changed`` is ``True``). It should also be noted that the", "import os import six get_strategy = None StratGroup = None FieldStrat = None", "the name of a strategy, or the actual strategy class (not an instance)", "mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" + mod_name) def mutate(field, strat_name_or_cls, num=100, at_once=1,", "the actual strategy class (not an instance) :param int num: The number of", "the yielded set of changed fields *can* be modified and is no longer", "FieldStrat = None def init(): global get_strategy global StratGroup global FieldStrat import pfp.fuzz.strats", "yielding ``num`` mutations that affect up to ``at_once`` fields at once. This function", "at once :param bool yield_changed: Yield a list of fields changed along with", "at_once)): # we'll never pull the same idx from idx_pool more than once", "dom :returns: generator \"\"\" import pfp.fuzz.rand as rand init() strat = get_strategy(strat_name_or_cls) to_mutate", "strategy, or the actual strategy class (not an instance) :param int num: The", "mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False): \"\"\"Mutate the provided field (probably a Dom or", "# we'll never pull the same idx from idx_pool more than once #", "will yield back the field after each mutation, optionally also yielding a ``set``", "original field yield field # restore the saved value of all subfields without", "also yielding a ``set`` of fields that were mutated in that iteration (if", "value of all subfields without # triggering events field._pfp__snapshot(recurse=True) count = 0 for", "chosen_fields = set() idx_pool = set([x for x in six.moves.xrange(len(with_strats))]) # modify `at_once`", "The number of mutations to yield :param int at_once: The number of fields", "number of fields to mutate at once :param bool yield_changed: Yield a list", "in [\"__init__.py\", \"base.py\"]: continue mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" + mod_name) def", "anymore del to_mutate # save the current value of all subfields without #", "save the current value of all subfields without # triggering events field._pfp__snapshot(recurse=True) count", "set([x for x in six.moves.xrange(len(with_strats))]) # modify `at_once` number of fields OR len(with_strats)", "\"\"\" import pfp.fuzz.rand as rand init() strat = get_strategy(strat_name_or_cls) to_mutate = strat.which(field) with_strats", "\"\") __import__(\"pfp.fuzz.\" + mod_name) def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False): \"\"\"Mutate the provided", "a ``set`` of fields that were mutated in that iteration (if ``yield_changed`` is", "back the field after each mutation, optionally also yielding a ``set`` of fields", "init() strat = get_strategy(strat_name_or_cls) to_mutate = strat.which(field) with_strats = [] for to_mutate_field in", "for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename = os.path.basename(strat_file) if filename in [\"__init__.py\", \"base.py\"]:", "along with the mutated dom :returns: generator \"\"\" import pfp.fuzz.rand as rand init()", "idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed: yield field, chosen_fields else: #", ":param strat_name_or_class: Can be the name of a strategy, or the actual strategy", "is no longer needed by the mutate() function. :param pfp.fields.Field field: The field", "set of changed fields *can* be modified and is no longer needed by", "# load all of the built-in strategies for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename", "Can be the name of a strategy, or the actual strategy class (not", "number of mutations to yield :param int at_once: The number of fields to", "# modify `at_once` number of fields OR len(with_strats) number of fields, # whichever", "generator \"\"\" import pfp.fuzz.rand as rand init() strat = get_strategy(strat_name_or_cls) to_mutate = strat.which(field)", "continue mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" + mod_name) def mutate(field, strat_name_or_cls, num=100,", "1)[0] idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed: yield field, chosen_fields else:", ":param int num: The number of mutations to yield :param int at_once: The", "iteration (if ``yield_changed`` is ``True``). It should also be noted that the yielded", "# since we're removing the idx after choosing it rand_idx = rand.sample(idx_pool, 1)[0]", "# -*- coding: utf-8 -*- \"\"\" This module contains the base classes used", "at_onces in six.moves.xrange(min(len(with_strats), at_once)): # we'll never pull the same idx from idx_pool", "os.path.basename(strat_file) if filename in [\"__init__.py\", \"base.py\"]: continue mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\"", "= filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" + mod_name) def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False):", "all subfields without # triggering events field._pfp__snapshot(recurse=True) count = 0 for x in", "the mutate() function. :param pfp.fields.Field field: The field to mutate (can be anything,", "same idx from idx_pool more than once # since we're removing the idx", "be the name of a strategy, or the actual strategy class (not an", "yield_changed: Yield a list of fields changed along with the mutated dom :returns:", "of fields changed along with the mutated dom :returns: generator \"\"\" import pfp.fuzz.rand", "were mutated in that iteration (if ``yield_changed`` is ``True``). It should also be", "to_mutate # save the current value of all subfields without # triggering events", "the field after each mutation, optionally also yielding a ``set`` of fields that", "built-in strategies for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename = os.path.basename(strat_file) if filename in", "of fields to mutate at once :param bool yield_changed: Yield a list of", "affect up to ``at_once`` fields at once. This function will yield back the", "with the mutated dom :returns: generator \"\"\" import pfp.fuzz.rand as rand init() strat", "global get_strategy global StratGroup global FieldStrat import pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy StratGroup =", ":returns: generator \"\"\" import pfp.fuzz.rand as rand init() strat = get_strategy(strat_name_or_cls) to_mutate =", "= pfp.fuzz.strats.FieldStrat # load all of the built-in strategies for strat_file in glob.glob(os.path.join(os.path.dirname(__file__),", "classes used when defining mutation strategies for pfp \"\"\" import glob import os", "fields, # whichever is lower for at_onces in six.moves.xrange(min(len(with_strats), at_once)): # we'll never", "triggering events field._pfp__snapshot(recurse=True) count = 0 for x in six.moves.range(num): chosen_fields = set()", "mutate() function. :param pfp.fields.Field field: The field to mutate (can be anything, not", "longer needed by the mutate() function. :param pfp.fields.Field field: The field to mutate", "at once. This function will yield back the field after each mutation, optionally", "when defining mutation strategies for pfp \"\"\" import glob import os import six", "for pfp \"\"\" import glob import os import six get_strategy = None StratGroup", "number of fields OR len(with_strats) number of fields, # whichever is lower for", "fields *can* be modified and is no longer needed by the mutate() function.", "field to mutate (can be anything, not just Dom/Structs) :param strat_name_or_class: Can be", "import glob import os import six get_strategy = None StratGroup = None FieldStrat", "Dom/Structs) :param strat_name_or_class: Can be the name of a strategy, or the actual", "pfp.fields.Field field: The field to mutate (can be anything, not just Dom/Structs) :param", "for at_onces in six.moves.xrange(min(len(with_strats), at_once)): # we'll never pull the same idx from", "del to_mutate # save the current value of all subfields without # triggering", "rand init() strat = get_strategy(strat_name_or_cls) to_mutate = strat.which(field) with_strats = [] for to_mutate_field", "[\"__init__.py\", \"base.py\"]: continue mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" + mod_name) def mutate(field,", "\"*.py\")): filename = os.path.basename(strat_file) if filename in [\"__init__.py\", \"base.py\"]: continue mod_name = filename.replace(\".py\",", "(if ``yield_changed`` is ``True``). It should also be noted that the yielded set", "count = 0 for x in six.moves.range(num): chosen_fields = set() idx_pool = set([x", "def init(): global get_strategy global StratGroup global FieldStrat import pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy", "pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat # load all of the built-in strategies for strat_file", "don't need these ones anymore del to_mutate # save the current value of", "yield :param int at_once: The number of fields to mutate at once :param", "field # restore the saved value of all subfields without # triggering events", "anything, not just Dom/Structs) :param strat_name_or_class: Can be the name of a strategy,", "fields changed along with the mutated dom :returns: generator \"\"\" import pfp.fuzz.rand as", "StratGroup = pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat # load all of the built-in strategies", "# restore the saved value of all subfields without # triggering events field._pfp__restore_snapshot(recurse=True)", "bool yield_changed: Yield a list of fields changed along with the mutated dom", "None: with_strats.append((to_mutate_field, field_strat)) # we don't need these ones anymore del to_mutate #", "get_strategy = pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat # load all of", "The field to mutate (can be anything, not just Dom/Structs) :param strat_name_or_class: Can", ":param int at_once: The number of fields to mutate at once :param bool", "FieldStrat import pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat #", "pfp.fuzz.rand as rand init() strat = get_strategy(strat_name_or_cls) to_mutate = strat.which(field) with_strats = []", "``at_once`` fields at once. This function will yield back the field after each", "strat = get_strategy(strat_name_or_cls) to_mutate = strat.which(field) with_strats = [] for to_mutate_field in to_mutate:", "fields that were mutated in that iteration (if ``yield_changed`` is ``True``). It should", "in six.moves.range(num): chosen_fields = set() idx_pool = set([x for x in six.moves.xrange(len(with_strats))]) #", "the idx after choosing it rand_idx = rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx]", "import six get_strategy = None StratGroup = None FieldStrat = None def init():", "once :param bool yield_changed: Yield a list of fields changed along with the", "field._pfp__snapshot(recurse=True) count = 0 for x in six.moves.range(num): chosen_fields = set() idx_pool =", "strat_name_or_class: Can be the name of a strategy, or the actual strategy class", "Dom or struct instance) using the strategy specified with ``strat_name_or_class``, yielding ``num`` mutations", "mutate at once :param bool yield_changed: Yield a list of fields changed along", "This module contains the base classes used when defining mutation strategies for pfp", "to_mutate = strat.which(field) with_strats = [] for to_mutate_field in to_mutate: field_strat = strat.get_field_strat(to_mutate_field)", "mutations to yield :param int at_once: The number of fields to mutate at", "changed along with the mutated dom :returns: generator \"\"\" import pfp.fuzz.rand as rand", "``num`` mutations that affect up to ``at_once`` fields at once. This function will", "pull the same idx from idx_pool more than once # since we're removing", "# triggering events field._pfp__snapshot(recurse=True) count = 0 for x in six.moves.range(num): chosen_fields =", "filename in [\"__init__.py\", \"base.py\"]: continue mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" + mod_name)", "modify `at_once` number of fields OR len(with_strats) number of fields, # whichever is", "just Dom/Structs) :param strat_name_or_class: Can be the name of a strategy, or the", "back the original field yield field # restore the saved value of all", "`at_once` number of fields OR len(with_strats) number of fields, # whichever is lower", "= set([x for x in six.moves.xrange(len(with_strats))]) # modify `at_once` number of fields OR", "It should also be noted that the yielded set of changed fields *can*", "import pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat # load", "strat_name_or_cls, num=100, at_once=1, yield_changed=False): \"\"\"Mutate the provided field (probably a Dom or struct", "strategies for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename = os.path.basename(strat_file) if filename in [\"__init__.py\",", "instance) :param int num: The number of mutations to yield :param int at_once:", "be modified and is no longer needed by the mutate() function. :param pfp.fields.Field", "\"\"\" import glob import os import six get_strategy = None StratGroup = None", "yield field # restore the saved value of all subfields without # triggering", "lower for at_onces in six.moves.xrange(min(len(with_strats), at_once)): # we'll never pull the same idx", "``True``). It should also be noted that the yielded set of changed fields", "\"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" + mod_name) def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False): \"\"\"Mutate the", "field_strat.mutate(rand_field) if yield_changed: yield field, chosen_fields else: # yield back the original field", "module contains the base classes used when defining mutation strategies for pfp \"\"\"", "at_once: The number of fields to mutate at once :param bool yield_changed: Yield", "# whichever is lower for at_onces in six.moves.xrange(min(len(with_strats), at_once)): # we'll never pull", "strategy class (not an instance) :param int num: The number of mutations to", "yield back the field after each mutation, optionally also yielding a ``set`` of", ":param pfp.fields.Field field: The field to mutate (can be anything, not just Dom/Structs)", "= with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed: yield field, chosen_fields else: # yield back", "up to ``at_once`` fields at once. This function will yield back the field", "idx from idx_pool more than once # since we're removing the idx after", "also be noted that the yielded set of changed fields *can* be modified", "function. :param pfp.fields.Field field: The field to mutate (can be anything, not just", "glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename = os.path.basename(strat_file) if filename in [\"__init__.py\", \"base.py\"]: continue mod_name =", "This function will yield back the field after each mutation, optionally also yielding", "int num: The number of mutations to yield :param int at_once: The number", "The number of fields to mutate at once :param bool yield_changed: Yield a", "fields OR len(with_strats) number of fields, # whichever is lower for at_onces in", "noted that the yielded set of changed fields *can* be modified and is", "rand_idx = rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed: yield", "defining mutation strategies for pfp \"\"\" import glob import os import six get_strategy", "pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat # load all of the built-in", "of changed fields *can* be modified and is no longer needed by the", "field yield field # restore the saved value of all subfields without #", "with ``strat_name_or_class``, yielding ``num`` mutations that affect up to ``at_once`` fields at once.", "the base classes used when defining mutation strategies for pfp \"\"\" import glob", "get_strategy global StratGroup global FieldStrat import pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup", "to mutate (can be anything, not just Dom/Structs) :param strat_name_or_class: Can be the", "mutations that affect up to ``at_once`` fields at once. This function will yield", "choosing it rand_idx = rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if", "# save the current value of all subfields without # triggering events field._pfp__snapshot(recurse=True)", "``strat_name_or_class``, yielding ``num`` mutations that affect up to ``at_once`` fields at once. This", "0 for x in six.moves.range(num): chosen_fields = set() idx_pool = set([x for x", "\"base.py\"]: continue mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\") __import__(\"pfp.fuzz.\" + mod_name) def mutate(field, strat_name_or_cls,", "= None StratGroup = None FieldStrat = None def init(): global get_strategy global", "of all subfields without # triggering events field._pfp__snapshot(recurse=True) count = 0 for x", "coding: utf-8 -*- \"\"\" This module contains the base classes used when defining", "ones anymore del to_mutate # save the current value of all subfields without", "with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed: yield field, chosen_fields else: # yield back the", "no longer needed by the mutate() function. :param pfp.fields.Field field: The field to", "after choosing it rand_idx = rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field)", "the provided field (probably a Dom or struct instance) using the strategy specified", "(can be anything, not just Dom/Structs) :param strat_name_or_class: Can be the name of", "import pfp.fuzz.rand as rand init() strat = get_strategy(strat_name_or_cls) to_mutate = strat.which(field) with_strats =", "int at_once: The number of fields to mutate at once :param bool yield_changed:", "= None def init(): global get_strategy global StratGroup global FieldStrat import pfp.fuzz.strats get_strategy", "as rand init() strat = get_strategy(strat_name_or_cls) to_mutate = strat.which(field) with_strats = [] for", "with_strats = [] for to_mutate_field in to_mutate: field_strat = strat.get_field_strat(to_mutate_field) if field_strat is", "be noted that the yielded set of changed fields *can* be modified and", "in six.moves.xrange(min(len(with_strats), at_once)): # we'll never pull the same idx from idx_pool more", "strategies for pfp \"\"\" import glob import os import six get_strategy = None", "x in six.moves.xrange(len(with_strats))]) # modify `at_once` number of fields OR len(with_strats) number of", "optionally also yielding a ``set`` of fields that were mutated in that iteration", "should also be noted that the yielded set of changed fields *can* be", "an instance) :param int num: The number of mutations to yield :param int", "None StratGroup = None FieldStrat = None def init(): global get_strategy global StratGroup", "for x in six.moves.range(num): chosen_fields = set() idx_pool = set([x for x in", "a strategy, or the actual strategy class (not an instance) :param int num:", "glob import os import six get_strategy = None StratGroup = None FieldStrat =", "rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed: yield field, chosen_fields else: # yield", "of a strategy, or the actual strategy class (not an instance) :param int", "using the strategy specified with ``strat_name_or_class``, yielding ``num`` mutations that affect up to", "changed fields *can* be modified and is no longer needed by the mutate()", "actual strategy class (not an instance) :param int num: The number of mutations", "six.moves.range(num): chosen_fields = set() idx_pool = set([x for x in six.moves.xrange(len(with_strats))]) # modify", "# we don't need these ones anymore del to_mutate # save the current", "mutation, optionally also yielding a ``set`` of fields that were mutated in that", "utf-8 -*- \"\"\" This module contains the base classes used when defining mutation", "at_once=1, yield_changed=False): \"\"\"Mutate the provided field (probably a Dom or struct instance) using", "= strat.which(field) with_strats = [] for to_mutate_field in to_mutate: field_strat = strat.get_field_strat(to_mutate_field) if", "len(with_strats) number of fields, # whichever is lower for at_onces in six.moves.xrange(min(len(with_strats), at_once)):", "mod_name) def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False): \"\"\"Mutate the provided field (probably a", "to ``at_once`` fields at once. This function will yield back the field after", "else: # yield back the original field yield field # restore the saved", "None def init(): global get_strategy global StratGroup global FieldStrat import pfp.fuzz.strats get_strategy =", "OR len(with_strats) number of fields, # whichever is lower for at_onces in six.moves.xrange(min(len(with_strats),", "mutate (can be anything, not just Dom/Structs) :param strat_name_or_class: Can be the name", "mutated dom :returns: generator \"\"\" import pfp.fuzz.rand as rand init() strat = get_strategy(strat_name_or_cls)", "of mutations to yield :param int at_once: The number of fields to mutate", "x in six.moves.range(num): chosen_fields = set() idx_pool = set([x for x in six.moves.xrange(len(with_strats))])", "global StratGroup global FieldStrat import pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup FieldStrat", "of the built-in strategies for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename = os.path.basename(strat_file) if", "= get_strategy(strat_name_or_cls) to_mutate = strat.which(field) with_strats = [] for to_mutate_field in to_mutate: field_strat", "python # -*- coding: utf-8 -*- \"\"\" This module contains the base classes", "since we're removing the idx after choosing it rand_idx = rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx)", "global FieldStrat import pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup FieldStrat = pfp.fuzz.strats.FieldStrat", "is lower for at_onces in six.moves.xrange(min(len(with_strats), at_once)): # we'll never pull the same", "with_strats.append((to_mutate_field, field_strat)) # we don't need these ones anymore del to_mutate # save", "the same idx from idx_pool more than once # since we're removing the", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" This module contains the base", "field_strat = strat.get_field_strat(to_mutate_field) if field_strat is not None: with_strats.append((to_mutate_field, field_strat)) # we don't", "idx_pool more than once # since we're removing the idx after choosing it", "struct instance) using the strategy specified with ``strat_name_or_class``, yielding ``num`` mutations that affect", "without # triggering events field._pfp__snapshot(recurse=True) count = 0 for x in six.moves.range(num): chosen_fields", "base classes used when defining mutation strategies for pfp \"\"\" import glob import", "yield back the original field yield field # restore the saved value of", "that the yielded set of changed fields *can* be modified and is no", "to yield :param int at_once: The number of fields to mutate at once", "once # since we're removing the idx after choosing it rand_idx = rand.sample(idx_pool,", "of fields OR len(with_strats) number of fields, # whichever is lower for at_onces", "= 0 for x in six.moves.range(num): chosen_fields = set() idx_pool = set([x for", "specified with ``strat_name_or_class``, yielding ``num`` mutations that affect up to ``at_once`` fields at", "six.moves.xrange(len(with_strats))]) # modify `at_once` number of fields OR len(with_strats) number of fields, #", "= None FieldStrat = None def init(): global get_strategy global StratGroup global FieldStrat", "idx after choosing it rand_idx = rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field)", "yielding a ``set`` of fields that were mutated in that iteration (if ``yield_changed``", "in to_mutate: field_strat = strat.get_field_strat(to_mutate_field) if field_strat is not None: with_strats.append((to_mutate_field, field_strat)) #", "load all of the built-in strategies for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename =", "filename = os.path.basename(strat_file) if filename in [\"__init__.py\", \"base.py\"]: continue mod_name = filename.replace(\".py\", \"\").replace(\".pyc\",", "these ones anymore del to_mutate # save the current value of all subfields", "init(): global get_strategy global StratGroup global FieldStrat import pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy StratGroup", "events field._pfp__snapshot(recurse=True) count = 0 for x in six.moves.range(num): chosen_fields = set() idx_pool", "fields to mutate at once :param bool yield_changed: Yield a list of fields", "a list of fields changed along with the mutated dom :returns: generator \"\"\"", "in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename = os.path.basename(strat_file) if filename in [\"__init__.py\", \"base.py\"]: continue mod_name", "we're removing the idx after choosing it rand_idx = rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat", "of fields that were mutated in that iteration (if ``yield_changed`` is ``True``). It", "field: The field to mutate (can be anything, not just Dom/Structs) :param strat_name_or_class:", "field after each mutation, optionally also yielding a ``set`` of fields that were", "we don't need these ones anymore del to_mutate # save the current value", "used when defining mutation strategies for pfp \"\"\" import glob import os import", "contains the base classes used when defining mutation strategies for pfp \"\"\" import", "number of fields, # whichever is lower for at_onces in six.moves.xrange(min(len(with_strats), at_once)): #", "if yield_changed: yield field, chosen_fields else: # yield back the original field yield", "the strategy specified with ``strat_name_or_class``, yielding ``num`` mutations that affect up to ``at_once``", "to_mutate_field in to_mutate: field_strat = strat.get_field_strat(to_mutate_field) if field_strat is not None: with_strats.append((to_mutate_field, field_strat))", "StratGroup global FieldStrat import pfp.fuzz.strats get_strategy = pfp.fuzz.strats.get_strategy StratGroup = pfp.fuzz.strats.StratGroup FieldStrat =", "-*- \"\"\" This module contains the base classes used when defining mutation strategies", "is ``True``). It should also be noted that the yielded set of changed", "get_strategy(strat_name_or_cls) to_mutate = strat.which(field) with_strats = [] for to_mutate_field in to_mutate: field_strat =", "# yield back the original field yield field # restore the saved value", "*can* be modified and is no longer needed by the mutate() function. :param", "or struct instance) using the strategy specified with ``strat_name_or_class``, yielding ``num`` mutations that", "after each mutation, optionally also yielding a ``set`` of fields that were mutated", "yielded set of changed fields *can* be modified and is no longer needed", "the built-in strategies for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")): filename = os.path.basename(strat_file) if filename", "is not None: with_strats.append((to_mutate_field, field_strat)) # we don't need these ones anymore del", "to_mutate: field_strat = strat.get_field_strat(to_mutate_field) if field_strat is not None: with_strats.append((to_mutate_field, field_strat)) # we", "not None: with_strats.append((to_mutate_field, field_strat)) # we don't need these ones anymore del to_mutate", "set() idx_pool = set([x for x in six.moves.xrange(len(with_strats))]) # modify `at_once` number of", "(probably a Dom or struct instance) using the strategy specified with ``strat_name_or_class``, yielding", "the mutated dom :returns: generator \"\"\" import pfp.fuzz.rand as rand init() strat =", "= os.path.basename(strat_file) if filename in [\"__init__.py\", \"base.py\"]: continue mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\")", "[] for to_mutate_field in to_mutate: field_strat = strat.get_field_strat(to_mutate_field) if field_strat is not None:", "in that iteration (if ``yield_changed`` is ``True``). It should also be noted that", "it rand_idx = rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed:", ":param bool yield_changed: Yield a list of fields changed along with the mutated", "six.moves.xrange(min(len(with_strats), at_once)): # we'll never pull the same idx from idx_pool more than", "list of fields changed along with the mutated dom :returns: generator \"\"\" import", "we'll never pull the same idx from idx_pool more than once # since", "rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed: yield field, chosen_fields", "be anything, not just Dom/Structs) :param strat_name_or_class: Can be the name of a", "strategy specified with ``strat_name_or_class``, yielding ``num`` mutations that affect up to ``at_once`` fields", "that affect up to ``at_once`` fields at once. This function will yield back", "= rand.sample(idx_pool, 1)[0] idx_pool.remove(rand_idx) rand_field,field_strat = with_strats[rand_idx] chosen_fields.add(rand_field) field_strat.mutate(rand_field) if yield_changed: yield field,", "num: The number of mutations to yield :param int at_once: The number of", "or the actual strategy class (not an instance) :param int num: The number", "\"\"\"Mutate the provided field (probably a Dom or struct instance) using the strategy" ]
[]
[ "b[0] = 1 def func2(b): b = b[:] b[0] = 2 def func3(b):", "1 def func2(b): b = b[:] b[0] = 2 def func3(b): b =", "b[0] = 3 l = ['one', 'two', 'three'] func(l[:]) print(\"l1:\", l) func2(l) print(\"l2:\",", "utf-8 -*- def func(b): b[0] = 1 def func2(b): b = b[:] b[0]", "= b.copy() b[0] = 3 l = ['one', 'two', 'three'] func(l[:]) print(\"l1:\", l)", "func(b): b[0] = 1 def func2(b): b = b[:] b[0] = 2 def", "-*- def func(b): b[0] = 1 def func2(b): b = b[:] b[0] =", "b[0] = 2 def func3(b): b = b.copy() b[0] = 3 l =", "def func3(b): b = b.copy() b[0] = 3 l = ['one', 'two', 'three']", "def func(b): b[0] = 1 def func2(b): b = b[:] b[0] = 2", "def func2(b): b = b[:] b[0] = 2 def func3(b): b = b.copy()", "b[:] b[0] = 2 def func3(b): b = b.copy() b[0] = 3 l", "= 3 l = ['one', 'two', 'three'] func(l[:]) print(\"l1:\", l) func2(l) print(\"l2:\", l)", "3 l = ['one', 'two', 'three'] func(l[:]) print(\"l1:\", l) func2(l) print(\"l2:\", l) func3(l)", "func2(b): b = b[:] b[0] = 2 def func3(b): b = b.copy() b[0]", "-*- coding: utf-8 -*- def func(b): b[0] = 1 def func2(b): b =", "l = ['one', 'two', 'three'] func(l[:]) print(\"l1:\", l) func2(l) print(\"l2:\", l) func3(l) print(\"l3:\",", "= 2 def func3(b): b = b.copy() b[0] = 3 l = ['one',", "# -*- coding: utf-8 -*- def func(b): b[0] = 1 def func2(b): b", "<filename>Course/functions/example_19.py # -*- coding: utf-8 -*- def func(b): b[0] = 1 def func2(b):", "b = b[:] b[0] = 2 def func3(b): b = b.copy() b[0] =", "coding: utf-8 -*- def func(b): b[0] = 1 def func2(b): b = b[:]", "func3(b): b = b.copy() b[0] = 3 l = ['one', 'two', 'three'] func(l[:])", "b = b.copy() b[0] = 3 l = ['one', 'two', 'three'] func(l[:]) print(\"l1:\",", "= ['one', 'two', 'three'] func(l[:]) print(\"l1:\", l) func2(l) print(\"l2:\", l) func3(l) print(\"l3:\", l)", "= 1 def func2(b): b = b[:] b[0] = 2 def func3(b): b", "b.copy() b[0] = 3 l = ['one', 'two', 'three'] func(l[:]) print(\"l1:\", l) func2(l)", "2 def func3(b): b = b.copy() b[0] = 3 l = ['one', 'two',", "= b[:] b[0] = 2 def func3(b): b = b.copy() b[0] = 3" ]
[ "import torch def is_pos_def(x): if torch.equal(x, x.t()): try: torch.linalg.cholesky(x) return True except RuntimeError:", "def is_pos_def(x): if torch.equal(x, x.t()): try: torch.linalg.cholesky(x) return True except RuntimeError: return False", "torch def is_pos_def(x): if torch.equal(x, x.t()): try: torch.linalg.cholesky(x) return True except RuntimeError: return", "torch.equal(x, x.t()): try: torch.linalg.cholesky(x) return True except RuntimeError: return False else: return False", "if torch.equal(x, x.t()): try: torch.linalg.cholesky(x) return True except RuntimeError: return False else: return", "is_pos_def(x): if torch.equal(x, x.t()): try: torch.linalg.cholesky(x) return True except RuntimeError: return False else:" ]
[ "pymongo import MongoClient def startConnection(ip = 'localhost',port = 27017,dbname = 'StockDatas'): client =", "= 'localhost',port = 27017,dbname = 'StockDatas'): client = MongoClient(ip,port) # try: # client.admin.command('ismaster')", "available') db = client[dbname] # print(db.collection_names(False)) if db == None: print(\"db not exist\")", "# except Exception as e: # print('Server not available') db = client[dbname] #", "MongoClient(ip,port) # try: # client.admin.command('ismaster') # except Exception as e: # print('Server not", "coding=utf-8 from pymongo import MongoClient def startConnection(ip = 'localhost',port = 27017,dbname = 'StockDatas'):", "# print('Server not available') db = client[dbname] # print(db.collection_names(False)) if db == None:", "not available') db = client[dbname] # print(db.collection_names(False)) if db == None: print(\"db not", "# client.admin.command('ismaster') # except Exception as e: # print('Server not available') db =", "= 27017,dbname = 'StockDatas'): client = MongoClient(ip,port) # try: # client.admin.command('ismaster') # except", "from pymongo import MongoClient def startConnection(ip = 'localhost',port = 27017,dbname = 'StockDatas'): client", "try: # client.admin.command('ismaster') # except Exception as e: # print('Server not available') db", "e: # print('Server not available') db = client[dbname] # print(db.collection_names(False)) if db ==", "import MongoClient def startConnection(ip = 'localhost',port = 27017,dbname = 'StockDatas'): client = MongoClient(ip,port)", "<gh_stars>1-10 #!/usr/bin python # coding=utf-8 from pymongo import MongoClient def startConnection(ip = 'localhost',port", "= 'StockDatas'): client = MongoClient(ip,port) # try: # client.admin.command('ismaster') # except Exception as", "= client[dbname] # print(db.collection_names(False)) if db == None: print(\"db not exist\") else: print(\"connect", "'localhost',port = 27017,dbname = 'StockDatas'): client = MongoClient(ip,port) # try: # client.admin.command('ismaster') #", "#!/usr/bin python # coding=utf-8 from pymongo import MongoClient def startConnection(ip = 'localhost',port =", "client = MongoClient(ip,port) # try: # client.admin.command('ismaster') # except Exception as e: #", "as e: # print('Server not available') db = client[dbname] # print(db.collection_names(False)) if db", "27017,dbname = 'StockDatas'): client = MongoClient(ip,port) # try: # client.admin.command('ismaster') # except Exception", "python # coding=utf-8 from pymongo import MongoClient def startConnection(ip = 'localhost',port = 27017,dbname", "MongoClient def startConnection(ip = 'localhost',port = 27017,dbname = 'StockDatas'): client = MongoClient(ip,port) #", "# print(db.collection_names(False)) if db == None: print(\"db not exist\") else: print(\"connect success\") return", "print(db.collection_names(False)) if db == None: print(\"db not exist\") else: print(\"connect success\") return db", "'StockDatas'): client = MongoClient(ip,port) # try: # client.admin.command('ismaster') # except Exception as e:", "def startConnection(ip = 'localhost',port = 27017,dbname = 'StockDatas'): client = MongoClient(ip,port) # try:", "# try: # client.admin.command('ismaster') # except Exception as e: # print('Server not available')", "startConnection(ip = 'localhost',port = 27017,dbname = 'StockDatas'): client = MongoClient(ip,port) # try: #", "= MongoClient(ip,port) # try: # client.admin.command('ismaster') # except Exception as e: # print('Server", "db = client[dbname] # print(db.collection_names(False)) if db == None: print(\"db not exist\") else:", "client[dbname] # print(db.collection_names(False)) if db == None: print(\"db not exist\") else: print(\"connect success\")", "Exception as e: # print('Server not available') db = client[dbname] # print(db.collection_names(False)) if", "print('Server not available') db = client[dbname] # print(db.collection_names(False)) if db == None: print(\"db", "except Exception as e: # print('Server not available') db = client[dbname] # print(db.collection_names(False))", "client.admin.command('ismaster') # except Exception as e: # print('Server not available') db = client[dbname]", "# coding=utf-8 from pymongo import MongoClient def startConnection(ip = 'localhost',port = 27017,dbname =" ]
[ "def get_object_or_none(klass, *args, **kwargs): \"\"\"Return an object or ``None`` if the object doesn't", "``None`` if the object doesn't exist.\"\"\" queryset = _get_queryset(klass) try: return queryset.get(*args, **kwargs)", "import _get_queryset def get_object_or_none(klass, *args, **kwargs): \"\"\"Return an object or ``None`` if the", "object or ``None`` if the object doesn't exist.\"\"\" queryset = _get_queryset(klass) try: return", "_get_queryset def get_object_or_none(klass, *args, **kwargs): \"\"\"Return an object or ``None`` if the object", "an object or ``None`` if the object doesn't exist.\"\"\" queryset = _get_queryset(klass) try:", "if the object doesn't exist.\"\"\" queryset = _get_queryset(klass) try: return queryset.get(*args, **kwargs) except", "<reponame>moccu/barbeque from django.shortcuts import _get_queryset def get_object_or_none(klass, *args, **kwargs): \"\"\"Return an object or", "*args, **kwargs): \"\"\"Return an object or ``None`` if the object doesn't exist.\"\"\" queryset", "or ``None`` if the object doesn't exist.\"\"\" queryset = _get_queryset(klass) try: return queryset.get(*args,", "the object doesn't exist.\"\"\" queryset = _get_queryset(klass) try: return queryset.get(*args, **kwargs) except queryset.model.DoesNotExist:", "from django.shortcuts import _get_queryset def get_object_or_none(klass, *args, **kwargs): \"\"\"Return an object or ``None``", "**kwargs): \"\"\"Return an object or ``None`` if the object doesn't exist.\"\"\" queryset =", "object doesn't exist.\"\"\" queryset = _get_queryset(klass) try: return queryset.get(*args, **kwargs) except queryset.model.DoesNotExist: return", "get_object_or_none(klass, *args, **kwargs): \"\"\"Return an object or ``None`` if the object doesn't exist.\"\"\"", "django.shortcuts import _get_queryset def get_object_or_none(klass, *args, **kwargs): \"\"\"Return an object or ``None`` if", "doesn't exist.\"\"\" queryset = _get_queryset(klass) try: return queryset.get(*args, **kwargs) except queryset.model.DoesNotExist: return None", "\"\"\"Return an object or ``None`` if the object doesn't exist.\"\"\" queryset = _get_queryset(klass)" ]
[]
[ "return data def create(self, validated_data): user = self.context.get(\"user\") story = Story.objects.create(user=user, **validated_data) return", "validate(self, data): story_url = data.get(\"story_url\", None) story_body_text = data.get(\"story_body_text\", None) if story_url is", "data def create(self, validated_data): user = self.context.get(\"user\") story = Story.objects.create(user=user, **validated_data) return story", "None) if story_url is None and story_body_text is None: raise serializers.ValidationError( \"One of", "\"title\", \"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"user\", ] read_only_fields = [", "raise serializers.ValidationError( \"One of story_url or story_body_text is required.\" ) return data def", "= [ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\", ] def validate(self, data): story_url =", "model = Story fields = [ \"id\", \"title\", \"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\",", "= UserSerializer(read_only=True) class Meta: model = Story fields = [ \"id\", \"title\", \"slug\",", "\"rank\", \"slug\", ] def validate(self, data): story_url = data.get(\"story_url\", None) story_body_text = data.get(\"story_body_text\",", "Meta: model = Story fields = [ \"id\", \"title\", \"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\",", "] def validate(self, data): story_url = data.get(\"story_url\", None) story_body_text = data.get(\"story_body_text\", None) if", "is required.\" ) return data def create(self, validated_data): user = self.context.get(\"user\") story =", "Story class StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True) class Meta: model = Story fields =", ".models import Story class StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True) class Meta: model = Story", "\"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\", ] def validate(self, data): story_url = data.get(\"story_url\", None)", "\"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"user\", ] read_only_fields = [ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\",", ") return data def create(self, validated_data): user = self.context.get(\"user\") story = Story.objects.create(user=user, **validated_data)", "from .models import Story class StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True) class Meta: model =", "\"rank\", \"user\", ] read_only_fields = [ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\", ] def", "from accounts.serializers import UserSerializer from .models import Story class StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True)", "import UserSerializer from .models import Story class StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True) class Meta:", "None and story_body_text is None: raise serializers.ValidationError( \"One of story_url or story_body_text is", "None: raise serializers.ValidationError( \"One of story_url or story_body_text is required.\" ) return data", "story_url is None and story_body_text is None: raise serializers.ValidationError( \"One of story_url or", "data): story_url = data.get(\"story_url\", None) story_body_text = data.get(\"story_body_text\", None) if story_url is None", "= data.get(\"story_url\", None) story_body_text = data.get(\"story_body_text\", None) if story_url is None and story_body_text", "if story_url is None and story_body_text is None: raise serializers.ValidationError( \"One of story_url", "UserSerializer from .models import Story class StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True) class Meta: model", "fields = [ \"id\", \"title\", \"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"user\",", "required.\" ) return data def create(self, validated_data): user = self.context.get(\"user\") story = Story.objects.create(user=user,", "data.get(\"story_body_text\", None) if story_url is None and story_body_text is None: raise serializers.ValidationError( \"One", "\"url_domain_name\", \"rank\", \"slug\", ] def validate(self, data): story_url = data.get(\"story_url\", None) story_body_text =", "\"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\", ] def validate(self, data): story_url = data.get(\"story_url\", None) story_body_text", "story_body_text = data.get(\"story_body_text\", None) if story_url is None and story_body_text is None: raise", "\"One of story_url or story_body_text is required.\" ) return data def create(self, validated_data):", "] read_only_fields = [ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\", ] def validate(self, data):", "and story_body_text is None: raise serializers.ValidationError( \"One of story_url or story_body_text is required.\"", "or story_body_text is required.\" ) return data def create(self, validated_data): user = self.context.get(\"user\")", "UserSerializer(read_only=True) class Meta: model = Story fields = [ \"id\", \"title\", \"slug\", \"story_url\",", "story_url or story_body_text is required.\" ) return data def create(self, validated_data): user =", "= Story fields = [ \"id\", \"title\", \"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\",", "def validate(self, data): story_url = data.get(\"story_url\", None) story_body_text = data.get(\"story_body_text\", None) if story_url", "story_body_text is None: raise serializers.ValidationError( \"One of story_url or story_body_text is required.\" )", "\"slug\", ] def validate(self, data): story_url = data.get(\"story_url\", None) story_body_text = data.get(\"story_body_text\", None)", "import serializers from accounts.serializers import UserSerializer from .models import Story class StorySerializer(serializers.ModelSerializer): user", "[ \"id\", \"title\", \"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"user\", ] read_only_fields", "of story_url or story_body_text is required.\" ) return data def create(self, validated_data): user", "class StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True) class Meta: model = Story fields = [", "\"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"user\", ] read_only_fields = [ \"number_of_comments\",", "read_only_fields = [ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\", ] def validate(self, data): story_url", "serializers.ValidationError( \"One of story_url or story_body_text is required.\" ) return data def create(self,", "[ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\", ] def validate(self, data): story_url = data.get(\"story_url\",", "None) story_body_text = data.get(\"story_body_text\", None) if story_url is None and story_body_text is None:", "\"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"user\", ] read_only_fields = [ \"number_of_comments\", \"number_of_votes\",", "is None and story_body_text is None: raise serializers.ValidationError( \"One of story_url or story_body_text", "serializers from accounts.serializers import UserSerializer from .models import Story class StorySerializer(serializers.ModelSerializer): user =", "user = UserSerializer(read_only=True) class Meta: model = Story fields = [ \"id\", \"title\",", "\"url_domain_name\", \"rank\", \"user\", ] read_only_fields = [ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\", ]", "accounts.serializers import UserSerializer from .models import Story class StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True) class", "is None: raise serializers.ValidationError( \"One of story_url or story_body_text is required.\" ) return", "import Story class StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True) class Meta: model = Story fields", "\"id\", \"title\", \"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"user\", ] read_only_fields =", "Story fields = [ \"id\", \"title\", \"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\",", "= [ \"id\", \"title\", \"slug\", \"story_url\", \"story_body_text\", \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"user\", ]", "data.get(\"story_url\", None) story_body_text = data.get(\"story_body_text\", None) if story_url is None and story_body_text is", "\"number_of_votes\", \"url_domain_name\", \"rank\", \"user\", ] read_only_fields = [ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\",", "\"user\", ] read_only_fields = [ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"slug\", ] def validate(self,", "story_body_text is required.\" ) return data def create(self, validated_data): user = self.context.get(\"user\") story", "from rest_framework import serializers from accounts.serializers import UserSerializer from .models import Story class", "= data.get(\"story_body_text\", None) if story_url is None and story_body_text is None: raise serializers.ValidationError(", "class Meta: model = Story fields = [ \"id\", \"title\", \"slug\", \"story_url\", \"story_body_text\",", "story_url = data.get(\"story_url\", None) story_body_text = data.get(\"story_body_text\", None) if story_url is None and", "rest_framework import serializers from accounts.serializers import UserSerializer from .models import Story class StorySerializer(serializers.ModelSerializer):", "\"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\", \"user\", ] read_only_fields = [ \"number_of_comments\", \"number_of_votes\", \"url_domain_name\", \"rank\",", "StorySerializer(serializers.ModelSerializer): user = UserSerializer(read_only=True) class Meta: model = Story fields = [ \"id\"," ]
[ "range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path)", "Image import os import xlwt import io_utils from PIL import ImageDraw from PIL", "@File : get_you_need_label_img.py # @Software: ZJ_AI # this code is for read some", "\"black\" draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10, height - 50), u\"\\\"\"", "file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for i in", "is for read some labels from excel and find according imgs and put", "from PIL import ImageDraw from PIL import ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\"", "def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\")", "file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for i in range(len(labels)): background =", "@Software: ZJ_AI # this code is for read some labels from excel and", "some labels from excel and find according imgs and put the imgs into", "range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path + \"\\\\\" + labels[i] + \".jpg\"", "= img_path + \"\\\\\" + labels[i] + \".jpg\" product, taste, weight, package =", "@Time : 5/24/2018 # @Author : CarrieChen # @File : get_you_need_label_img.py # @Software:", "if pointlabel in labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value taste = table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value", "import ImageDraw from PIL import ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\"", "background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path + \"\\\\\" + labels[i] + \".jpg\" product,", "img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for", "-*- # @Time : 5/24/2018 # @Author : CarrieChen # @File : get_you_need_label_img.py", "- 50), u\"\\\"\" + product+taste+weight+package + \"\\\"\", font=setFont, fill=fillColor) background.save(this_batch_imgs_path+\"\\\\\"+labels[i]+\".jpg\") if __name__ ==\"__main__\":", "= table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i in", "@Author : CarrieChen # @File : get_you_need_label_img.py # @Software: ZJ_AI # this code", "get_you_need_label_img.py # @Software: ZJ_AI # this code is for read some labels from", "# this code is for read some labels from excel and find according", "the imgs into a word. import xlrd import docx from PIL import Image", "product, taste, weight, package = get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw = ImageDraw.Draw(background) width,", "<filename>development/server/algorithm/tf_faster_rcnn/data_processing/small_tools/get_you_need_label_img.py<gh_stars>0 # -*- coding: utf-8 -*- # @Time : 5/24/2018 # @Author :", "fill=fillColor) draw.text((10, height - 50), u\"\\\"\" + product+taste+weight+package + \"\\\"\", font=setFont, fill=fillColor) background.save(this_batch_imgs_path+\"\\\\\"+labels[i]+\".jpg\")", "and find according imgs and put the imgs into a word. import xlrd", "refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return labels def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path)", "Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path + \"\\\\\" + labels[i] + \".jpg\" product, taste, weight,", "# @Software: ZJ_AI # this code is for read some labels from excel", "PIL import ImageDraw from PIL import ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\"", "#这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for i in range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img", "- 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10, height - 50), u\"\\\"\" + product+taste+weight+package +", "5/24/2018 # @Author : CarrieChen # @File : get_you_need_label_img.py # @Software: ZJ_AI #", "table=data.sheets()[0] labels=table.col_values(4) if pointlabel in labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value taste = table.cell(row, 1).value", "draw = ImageDraw.Draw(background) width, height = background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor =", "this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return labels def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0]", "for read some labels from excel and find according imgs and put the", "font=setFont, fill=fillColor) draw.text((10, height - 50), u\"\\\"\" + product+taste+weight+package + \"\\\"\", font=setFont, fill=fillColor)", "word. import xlrd import docx from PIL import Image import os import xlwt", "def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if pointlabel in labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value", "+ product+taste+weight+package + \"\\\"\", font=setFont, fill=fillColor) background.save(this_batch_imgs_path+\"\\\\\"+labels[i]+\".jpg\") if __name__ ==\"__main__\": labels=get_labels(excel_path) #find_imgs_and_write_word(labels,parent_path) find_imgs_and_save_as_imgs(labels,parent_path)", "img_path + \"\\\\\" + labels[i] + \".jpg\" product, taste, weight, package = get_chinese(refer_166classes,", "into a word. import xlrd import docx from PIL import Image import os", ": get_you_need_label_img.py # @Software: ZJ_AI # this code is for read some labels", "package=table.cell(row,3).value return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img)", "taste, weight, package = get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw = ImageDraw.Draw(background) width, height", "product=table.cell(row,0).value taste = table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for", "height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10, height - 50), u\"\\\"\" + product+taste+weight+package", "taste = table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i", "50), u\"\\\"\" + product+taste+weight+package + \"\\\"\", font=setFont, fill=fillColor) background.save(this_batch_imgs_path+\"\\\\\"+labels[i]+\".jpg\") if __name__ ==\"__main__\": labels=get_labels(excel_path)", "docx from PIL import Image import os import xlwt import io_utils from PIL", "read some labels from excel and find according imgs and put the imgs", "+ labels[i] + \".jpg\" product, taste, weight, package = get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50])", "get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if pointlabel in labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value taste", "excel and find according imgs and put the imgs into a word. import", "parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return labels def", "i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels,", "data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if pointlabel in labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value taste = table.cell(row,", "1).value weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\"", "import xlrd import docx from PIL import Image import os import xlwt import", "pointlabel in labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value taste = table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value return", "code is for read some labels from excel and find according imgs and", "ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0)", "\".jpg\" product, taste, weight, package = get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw = ImageDraw.Draw(background)", "os import xlwt import io_utils from PIL import ImageDraw from PIL import ImageFont", "row=labels.index(pointlabel) product=table.cell(row,0).value taste = table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document()", "labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw = ImageDraw.Draw(background) width, height = background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf',", "background.paste(img,[100,50]) draw = ImageDraw.Draw(background) width, height = background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor", "table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i in range(len(labels)):", "# @File : get_you_need_label_img.py # @Software: ZJ_AI # this code is for read", "PIL import Image import os import xlwt import io_utils from PIL import ImageDraw", "io_utils from PIL import ImageDraw from PIL import ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\"", "file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for i in range(len(labels)):", "u\"\\\"\" + product+taste+weight+package + \"\\\"\", font=setFont, fill=fillColor) background.save(this_batch_imgs_path+\"\\\\\"+labels[i]+\".jpg\") if __name__ ==\"__main__\": labels=get_labels(excel_path) #find_imgs_and_write_word(labels,parent_path)", "paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return labels", ": CarrieChen # @File : get_you_need_label_img.py # @Software: ZJ_AI # this code is", "table=data.sheets()[0] labels=table.col_values(0) return labels def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if pointlabel in", "labels=table.col_values(0) return labels def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if pointlabel in labels:", "coding: utf-8 -*- # @Time : 5/24/2018 # @Author : CarrieChen # @File", "product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i])", "from PIL import ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path):", "for i in range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path + \"\\\\\" +", "product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for i", "= Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path + \"\\\\\" + labels[i] + \".jpg\" product, taste,", "PIL import ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path)", "in labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value taste = table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package", "#excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if pointlabel in labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value taste =", "import Image import os import xlwt import io_utils from PIL import ImageDraw from", "draw.text((10, height - 50), u\"\\\"\" + product+taste+weight+package + \"\\\"\", font=setFont, fill=fillColor) background.save(this_batch_imgs_path+\"\\\\\"+labels[i]+\".jpg\") if", "labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value taste = table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package def", "+ \".jpg\" product, taste, weight, package = get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw =", "excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return labels def get_chinese(input_path,pointlabel):", "find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\")", "in range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path + \"\\\\\" + labels[i] +", "\"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for i in range(len(labels)): background", "return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\"", "this code is for read some labels from excel and find according imgs", "= background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor = \"black\" draw.text((10, height - 100),", "put the imgs into a word. import xlrd import docx from PIL import", "+ \"\\\\\" + labels[i] + \".jpg\" product, taste, weight, package = get_chinese(refer_166classes, labels[i])", "imgs into a word. import xlrd import docx from PIL import Image import", "# @Time : 5/24/2018 # @Author : CarrieChen # @File : get_you_need_label_img.py #", "import io_utils from PIL import ImageDraw from PIL import ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\"", "CarrieChen # @File : get_you_need_label_img.py # @Software: ZJ_AI # this code is for", "\"\\\\\" + labels[i] + \".jpg\" product, taste, weight, package = get_chinese(refer_166classes, labels[i]) img=Image.open(img)", "fillColor = \"black\" draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10, height -", "imgs and put the imgs into a word. import xlrd import docx from", "= get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw = ImageDraw.Draw(background) width, height = background.size setFont", "labels def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if pointlabel in labels: #else row=labels.index(pointlabel)", "in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path):", "def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for i in range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img =", "= ImageDraw.Draw(background) width, height = background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor = \"black\"", "from excel and find according imgs and put the imgs into a word.", "return labels def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if pointlabel in labels: #else", "ImageDraw from PIL import ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def", "xlwt import io_utils from PIL import ImageDraw from PIL import ImageFont #some paths", "labels=table.col_values(4) if pointlabel in labels: #else row=labels.index(pointlabel) product=table.cell(row,0).value taste = table.cell(row, 1).value weight=table.cell(row,2).value", "file=docx.Document() for i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字", "for i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i]) file.add_picture(img) file.add_paragraph(product+taste+weight+package+\" \"+labels[i]) file.add_paragraph(\"\\n\") file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def", "height = background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor = \"black\" draw.text((10, height -", "-*- coding: utf-8 -*- # @Time : 5/24/2018 # @Author : CarrieChen #", "io_utils.mkdir(this_batch_imgs_path) for i in range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path + \"\\\\\"", "background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor = \"black\" draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\",", "a word. import xlrd import docx from PIL import Image import os import", "ImageDraw.Draw(background) width, height = background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor = \"black\" draw.text((10,", "width, height = background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor = \"black\" draw.text((10, height", "= ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor = \"black\" draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor)", "ZJ_AI # this code is for read some labels from excel and find", "get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw = ImageDraw.Draw(background) width, height = background.size setFont =", "#some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return", "import docx from PIL import Image import os import xlwt import io_utils from", "xlrd import docx from PIL import Image import os import xlwt import io_utils", "img = img_path + \"\\\\\" + labels[i] + \".jpg\" product, taste, weight, package", "img=Image.open(img) background.paste(img,[100,50]) draw = ImageDraw.Draw(background) width, height = background.size setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30)", "height - 50), u\"\\\"\" + product+taste+weight+package + \"\\\"\", font=setFont, fill=fillColor) background.save(this_batch_imgs_path+\"\\\\\"+labels[i]+\".jpg\") if __name__", "draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10, height - 50), u\"\\\"\" +", "# @Author : CarrieChen # @File : get_you_need_label_img.py # @Software: ZJ_AI # this", "img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return labels def get_chinese(input_path,pointlabel): #excel", "weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path): file=docx.Document() for i in range(len(labels)): img=img_path+\"\\\\\"+labels[i]+\".jpg\" product,taste,weight,package=get_chinese(refer_166classes,labels[i])", "def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return labels def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4)", "100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10, height - 50), u\"\\\"\" + product+taste+weight+package + \"\\\"\",", "i in range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path + \"\\\\\" + labels[i]", "#else row=labels.index(pointlabel) product=table.cell(row,0).value taste = table.cell(row, 1).value weight=table.cell(row,2).value package=table.cell(row,3).value return product,taste,weight,package def find_imgs_and_write_word(labels,parent_path):", ": 5/24/2018 # @Author : CarrieChen # @File : get_you_need_label_img.py # @Software: ZJ_AI", "get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return labels def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if", "parent_path): io_utils.mkdir(this_batch_imgs_path) for i in range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path +", "file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字 def find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for i in range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\")", "import ImageFont #some paths parent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\" excel_path=parent_path+\"\\\\本批商品列表.xls\" img_path=parent_path+\"\\\\pic+lab166\" refer_166classes=parent_path+\"\\\\166_classes_list.xls\" this_batch_imgs_path=parent_path+\"\\\\本批商品图例\" def get_labels(input_path): data=xlrd.open_workbook(input_path) table=data.sheets()[0]", "and put the imgs into a word. import xlrd import docx from PIL", "ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor = \"black\" draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10,", "from PIL import Image import os import xlwt import io_utils from PIL import", "labels from excel and find according imgs and put the imgs into a", "find_imgs_and_save_as_imgs(labels, parent_path): io_utils.mkdir(this_batch_imgs_path) for i in range(len(labels)): background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\") img = img_path", "according imgs and put the imgs into a word. import xlrd import docx", "import os import xlwt import io_utils from PIL import ImageDraw from PIL import", "data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(0) return labels def get_chinese(input_path,pointlabel): #excel data=xlrd.open_workbook(input_path) table=data.sheets()[0] labels=table.col_values(4) if pointlabel", "= \"black\" draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10, height - 50),", "30) fillColor = \"black\" draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10, height", "weight, package = get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw = ImageDraw.Draw(background) width, height =", "u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor) draw.text((10, height - 50), u\"\\\"\" + product+taste+weight+package + \"\\\"\", font=setFont,", "find according imgs and put the imgs into a word. import xlrd import", "package = get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw = ImageDraw.Draw(background) width, height = background.size", "labels[i] + \".jpg\" product, taste, weight, package = get_chinese(refer_166classes, labels[i]) img=Image.open(img) background.paste(img,[100,50]) draw", "# -*- coding: utf-8 -*- # @Time : 5/24/2018 # @Author : CarrieChen", "setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30) fillColor = \"black\" draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont,", "import xlwt import io_utils from PIL import ImageDraw from PIL import ImageFont #some", "utf-8 -*- # @Time : 5/24/2018 # @Author : CarrieChen # @File :" ]
[ "0 <= orientation0 < len(rotation): if rotation[orientation0] is not None: img = img.transpose(rotation[orientation0])", "image has the same aspect ratio as the input. Returns ------- The resized", "img = PIL.Image.open(filename) if img.mode != \"RGB\": img = img.convert(\"RGB\") if hasattr(img, \"_getexif\"):", "imcast(img, dtype, color_space)) def imresize(img, size): \"\"\"Resize the input image. Parameters ---------- img:", "dtype == np.uint8: if img.dtype == np.float32 or img.dtype == np.float64: dst =", "python # # Author: <NAME>. # Created: Dec 11, 2014. \"\"\"Some utility functions", "`[0, 65535]`; * `np.float32` and `np.float64`: `[0.0, 1.0]`. For example, if the input", "empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a mosaic of image icons. Parameters ---------- icons: a", "def imwrite(filename, img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an :py:func:`imcast` before writing to the output", "# Set default parameters. num_icons = len(icons) assert num_icons > 0 if icon_shape", "a 2-tuple for `(num_rows, num_cols)` One of the `num_rows` or `num_cols` can be", "for c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c] return mosaic_image def image_size_from_file(filename): \"\"\"Read", "img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img, dtype) elif color_space ==", "following cast: * `np.uint8`: `L <- L * 255 / 100, a <-", "elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img * 255., np.uint8)", "visual explanation # also, rotations are counter-clockwise in PIL orientation = int(orientation) rotation", "icons. mosaic_dtype: dtype The data type of output mosaic. Returns ------- The created", "dtype == np.float64: if img.dtype == np.uint8: return np.asarray(img, dtype) / 255. elif", "img.dtype == dtype: return img if color_space == \"default\": if dtype == np.uint8:", "/ 65535. elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img, dtype)", "dtype, color_space)) def imresize(img, size): \"\"\"Resize the input image. Parameters ---------- img: ndarray", "border_size, icon_shape[2]) # Create mosaic image and fill with border color. mosaic_image =", "% \\ (img.dtype, dtype, color_space)) def imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read the image followed", "None: empty_color = np.zeros(num_channels) if mosaic_shape is None: num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows =", "for idx in xrange(num_icons): i = idx / mosaic_shape[1] j = idx %", "with empty colors. for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i = idx / mosaic_shape[1]", "None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None] orientation0 = orientation - 1 # it's", "type that output image to be cast into. color_space: string, optional The color", "len(icons) assert num_icons > 0 if icon_shape is None: icon_shape = icons[0].shape assert", "100. dst[:,:,1] = img[:,:,1] + 128. dst[:,:,2] = img[:,:,2] + 128. return dst", "This function only loads but the image header (rather than the whole rasterized", "if img.dtype == np.uint16: return np.asarray(img / 257, np.uint8) elif img.dtype == np.float32", "which will be inferred such that the output image has the same aspect", "< 0: num_cols = num_rows * img.shape[1] / img.shape[0] else: num_rows = int(round(img.shape[0]", "img._getexif() or {} except IOError: exif = {} orientation = exif.get(0x0112) if orientation:", "specified, try to make a square mosaic according to number of icons. mosaic_dtype:", "dtype=mosaic_dtype) for c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c] # Fill in the input", "dst[:,:,2] = img[:,:,2] + 128. return dst elif dtype == np.float32 or dtype", "shape of icons in the output mosaic as `(num_rows, num_cols, num_channels)`. If not", "-1, which will be inferred such that the output image has the same", "dst = np.empty(img.shape, dtype) dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100. dst[:,:,1]", "2-tuple for `(num_rows, num_cols)` One of the `num_rows` or `num_cols` can be -1,", "handle images.\"\"\" import math import numpy as np import PIL.Image from PIL.Image import", "`np.asarray(img / 255., np.float32)`. * For `color_space==\"CIE-L*a*b*\"`, the \"normal\" value ranges are `0", "a mosaic of image icons. Parameters ---------- icons: a list of `ndarray`s A", "of image icons. Parameters ---------- icons: a list of `ndarray`s A list of", "128`; * `np.uint16`: currently not supported; * `np.float32` and `np.float64`: left as is.", "img = img.transpose(rotation[orientation0]) if flip[orientation0] is not None: img = img.transpose(flip[orientation0]) return imcast(np.array(img),", "num_cols * img.shape[0] / img.shape[1] if num_cols < 0: num_cols = num_rows *", "border_size: int, optional The size of border. border_color: 3-tuple, optional The color of", "and fill with border color. mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c in xrange(mosaic_image.shape[2]):", "np.uint8) dst[:,:,0] = img[:,:,0] * 255. / 100. dst[:,:,1] = img[:,:,1] + 128.", "not specified, use the shape of first image in `icons`. border_size: int, optional", "the same aspect ratio as the input. Returns ------- The resized image. \"\"\"", "np.float64: dst = np.empty(img.shape, np.uint8) dst[:,:,0] = img[:,:,0] * 255. / 100. dst[:,:,1]", "empty_color is None: empty_color = np.zeros(num_channels) if mosaic_shape is None: num_cols = int(math.ceil(math.sqrt(num_icons)))", "icons. for idx in xrange(num_icons): i = idx / mosaic_shape[1] j = idx", "the input image. Parameters ---------- img: ndarray The input image to be resized.", "is cast into `dtype`. Notes ----- * For `color_space==\"default\"`, we perform a linear", "== np.float64: if img.dtype == np.uint8: dst = np.empty(img.shape, dtype) dst[:,:,0] = np.asarray(img[:,:,0],", "hasattr(size, \"__len__\"): num_rows, num_cols = size assert (num_rows > 0) or (num_cols >", "a + 128, b <- b + 128`; * `np.uint16`: currently not supported;", "= empty_color[c] return mosaic_image def image_size_from_file(filename): \"\"\"Read the image size from a file.", "Parameters ---------- img: ndarray The input image to be resized. size: a scalar", "* (icon_shape[1] + border_size) for c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c] return", "for `(num_rows, num_cols)` One of the `num_rows` or `num_cols` can be -1, which", "img = img.transpose(flip[orientation0]) return imcast(np.array(img), dtype, color_space) def imwrite(filename, img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform", "return mosaic_image def image_size_from_file(filename): \"\"\"Read the image size from a file. This function", "imwrite(filename, img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an :py:func:`imcast` before writing to the output file.\"\"\"", "np.uint8: return np.asarray(img, np.uint16) * 257 elif img.dtype == np.float32 or img.dtype ==", "except IOError: exif = {} orientation = exif.get(0x0112) if orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html", "ndarray The input image to be resized. size: a scalar for `scale` or", "img.dtype == np.uint16: return np.asarray(img / 257, np.uint8) elif img.dtype == np.float32 or", "color_space)) def imresize(img, size): \"\"\"Resize the input image. Parameters ---------- img: ndarray The", "image file. Returns ------- The 2-tuple for image size `(num_rows, num_cols)`. \"\"\" with", "of border. border_color: 3-tuple, optional The color of border, black if not specified.", "i * (icon_shape[0] + border_size) jStart = j * (icon_shape[1] + border_size) for", "the following cast: * `np.uint8`: `L <- L * 255 / 100, a", "num_rows = num_cols * img.shape[0] / img.shape[1] if num_cols < 0: num_cols =", "first image in `icons`. border_size: int, optional The size of border. border_color: 3-tuple,", "dtype) - 128. dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128. return dst raise Exception(", "mosaic according to number of icons. mosaic_dtype: dtype The data type of output", "ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform def imcast(img, dtype, color_space=\"default\"): \"\"\"Cast the input", "the image size from a file. This function only loads but the image", "- 1 # it's 1-indexed per the EXIF spec if 0 <= orientation0", "# it's 1-indexed per the EXIF spec if 0 <= orientation0 < len(rotation):", "* 255 / 100, a <- a + 128, b <- b +", "to number of icons. mosaic_dtype: dtype The data type of output mosaic. Returns", "be resized. size: a scalar for `scale` or a 2-tuple for `(num_rows, num_cols)`", "int(round(img.shape[0] * size)) num_cols = int(round(img.shape[1] * size)) return skimage.transform.resize(img, (num_rows, num_cols)) def", "------- The created mosaic image. \"\"\" # Set default parameters. num_icons = len(icons)", "num_cols = num_rows * img.shape[1] / img.shape[0] else: num_rows = int(round(img.shape[0] * size))", "black if not specified. mosaic_shape: 2-tuple, optional The shape of output mosaic as", "ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform def imcast(img, dtype, color_space=\"default\"): \"\"\"Cast the input image", "(num_cols > 0) if num_rows < 0: num_rows = num_cols * img.shape[0] /", "linear scaling with following range conventions: * `np.uint8`: `[0, 255]`; * `np.uint16`: `[0,", "= int(math.ceil(float(num_icons) / num_cols)) mosaic_shape = (num_rows, num_cols) mosaic_image_shape = ( mosaic_shape[0] *", "ROTATE_270, ROTATE_90, ROTATE_90] flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None]", "per the EXIF spec if 0 <= orientation0 < len(rotation): if rotation[orientation0] is", "\"RGB\": img = img.convert(\"RGB\") if hasattr(img, \"_getexif\"): try: exif = img._getexif() or {}", "if dtype == np.uint8: if img.dtype == np.uint16: return np.asarray(img / 257, np.uint8)", "output image that is cast into `dtype`. Notes ----- * For `color_space==\"default\"`, we", "def imresize(img, size): \"\"\"Resize the input image. Parameters ---------- img: ndarray The input", "to the output file.\"\"\" import scipy.misc return scipy.misc.imsave(filename, imcast(img, dtype, color_space)) def imresize(img,", "assert len(icon_shape) == 3 num_channels = icon_shape[2] if border_color is None: border_color =", "icon_shape[2] if border_color is None: border_color = np.zeros(num_channels) if empty_color is None: empty_color", "The size of border. border_color: 3-tuple, optional The color of border, black if", "the output mosaic as `(num_rows, num_cols, num_channels)`. If not specified, use the shape", "num_cols, num_channels)`. If not specified, use the shape of first image in `icons`.", "np.zeros(num_channels) if mosaic_shape is None: num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons) / num_cols))", "as `(num_icons_per_row, num_icons_per_col)`. If not specified, try to make a square mosaic according", "!= \"RGB\": img = img.convert(\"RGB\") if hasattr(img, \"_getexif\"): try: exif = img._getexif() or", "a linear scaling with following range conventions: * `np.uint8`: `[0, 255]`; * `np.uint16`:", "img.transpose(flip[orientation0]) return imcast(np.array(img), dtype, color_space) def imwrite(filename, img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an :py:func:`imcast`", "* img.shape[0] / img.shape[1] if num_cols < 0: num_cols = num_rows * img.shape[1]", "j * (icon_shape[1] + border_size) for c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c]", "dst[:,:,1] = img[:,:,1] + 128. dst[:,:,2] = img[:,:,2] + 128. return dst elif", "num_cols = int(round(img.shape[1] * size)) return skimage.transform.resize(img, (num_rows, num_cols)) def create_icon_mosaic(icons, icon_shape=None, border_size=1,", "= np.zeros(num_channels) if empty_color is None: empty_color = np.zeros(num_channels) if mosaic_shape is None:", "not specified, try to make a square mosaic according to number of icons.", "will be inferred such that the output image has the same aspect ratio", "from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform def imcast(img, dtype,", "all icons to be multi-channel images of the same size. icon_shape: 3-tuple, optional", "of `ndarray`s A list of icons to be put together for mosaic. Currently", "function only loads but the image header (rather than the whole rasterized data)", "input image. Parameters ---------- img: ndarray The input image to be resized. size:", "= img.transpose(rotation[orientation0]) if flip[orientation0] is not None: img = img.transpose(flip[orientation0]) return imcast(np.array(img), dtype,", "mosaic as `(num_icons_per_row, num_icons_per_col)`. If not specified, try to make a square mosaic", "hasattr(img, \"_getexif\"): try: exif = img._getexif() or {} except IOError: exif = {}", "ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90] flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT,", "if rotation[orientation0] is not None: img = img.transpose(rotation[orientation0]) if flip[orientation0] is not None:", "128. return dst raise Exception( \"Unexpected conversion from '%s' to '%s' with '%s'", "image_size_from_file(filename): \"\"\"Read the image size from a file. This function only loads but", "or {} except IOError: exif = {} orientation = exif.get(0x0112) if orientation: #", "http://jpegclub.org/exif_orientation.html for a nice visual explanation # also, rotations are counter-clockwise in PIL", "idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i = idx / mosaic_shape[1] j = idx %", "image. dtype: np.dtype The type that output image to be cast into. color_space:", "dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128. dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128. return", "`num_rows` or `num_cols` can be -1, which will be inferred such that the", "will be `np.asarray(img / 255., np.float32)`. * For `color_space==\"CIE-L*a*b*\"`, the \"normal\" value ranges", "empty cells, black if not specified. mosaic_shape: 2-tuple, optional The shape of output", "num_rows < 0: num_rows = num_cols * img.shape[0] / img.shape[1] if num_cols <", "dtype=np.uint8, color_space=\"default\"): \"\"\"Read the image followed by an :py:func:`imcast`.\"\"\" img = PIL.Image.open(filename) if", "following range conventions: * `np.uint8`: `[0, 255]`; * `np.uint16`: `[0, 65535]`; * `np.float32`", "def create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a mosaic of image", "math import numpy as np import PIL.Image from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270,", "+ (mosaic_shape[1]-1) * border_size, icon_shape[2]) # Create mosaic image and fill with border", "of output mosaic. Returns ------- The created mosaic image. \"\"\" # Set default", "Create mosaic image and fill with border color. mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype) for", "np.asarray(img, dtype) / 65535. elif img.dtype == np.float32 or img.dtype == np.float64: return", "Fill in the input icons. for idx in xrange(num_icons): i = idx /", "np.float32 or dtype == np.float64: if img.dtype == np.uint8: return np.asarray(img, dtype) /", "np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c] # Fill in the", "int(math.ceil(float(num_icons) / num_cols)) mosaic_shape = (num_rows, num_cols) mosaic_image_shape = ( mosaic_shape[0] * icon_shape[0]", "== np.uint16: return np.asarray(img, dtype) / 65535. elif img.dtype == np.float32 or img.dtype", "dtype == np.uint16: if img.dtype == np.uint8: return np.asarray(img, np.uint16) * 257 elif", "For example, if the input `img` is of `np.uint8` type and the expected", "if img.dtype == np.uint8: return np.asarray(img, dtype) / 255. elif img.dtype == np.uint16:", "FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None] orientation0 = orientation - 1 # it's 1-indexed per", "None: img = img.transpose(rotation[orientation0]) if flip[orientation0] is not None: img = img.transpose(flip[orientation0]) return", "num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons) / num_cols)) mosaic_shape = (num_rows, num_cols) mosaic_image_shape", "in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c] # Fill in the input icons. for idx", "100, a <- a + 128, b <- b + 128`; * `np.uint16`:", "j = idx % mosaic_shape[1] iStart = i * (icon_shape[0] + border_size) jStart", "spec if 0 <= orientation0 < len(rotation): if rotation[orientation0] is not None: img", "rotation = [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90] flip = [None,", "== dtype: return img if color_space == \"default\": if dtype == np.uint8: if", "`(num_icons_per_row, num_icons_per_col)`. If not specified, try to make a square mosaic according to", "`[0, 255]`; * `np.uint16`: `[0, 65535]`; * `np.float32` and `np.float64`: `[0.0, 1.0]`. For", "(mosaic_shape[1]-1) * border_size, icon_shape[2]) # Create mosaic image and fill with border color.", "or dtype == np.float64: if img.dtype == np.uint8: dst = np.empty(img.shape, dtype) dst[:,:,0]", "output image has the same aspect ratio as the input. Returns ------- The", "100, -127 <= a, b <= 127`, and we perform the following cast:", "as `(num_rows, num_cols, num_channels)`. If not specified, use the shape of first image", "= j * (icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx] # Fill the", "orientation - 1 # it's 1-indexed per the EXIF spec if 0 <=", "header (rather than the whole rasterized data) in order to determine its dimension.", "PIL orientation = int(orientation) rotation = [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90,", "conversion from '%s' to '%s' with '%s' color space\" % \\ (img.dtype, dtype,", "shape of output mosaic as `(num_icons_per_row, num_icons_per_col)`. If not specified, try to make", "# for explanation of the magical constants # or see http://jpegclub.org/exif_orientation.html for a", "/ num_cols)) mosaic_shape = (num_rows, num_cols) mosaic_image_shape = ( mosaic_shape[0] * icon_shape[0] +", "`np.uint16`: currently not supported; * `np.float32` and `np.float64`: left as is. \"\"\" if", "dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an :py:func:`imcast` before writing to the output file.\"\"\" import scipy.misc", "None: icon_shape = icons[0].shape assert len(icon_shape) == 3 num_channels = icon_shape[2] if border_color", "a <- a + 128, b <- b + 128`; * `np.uint16`: currently", "b + 128`; * `np.uint16`: currently not supported; * `np.float32` and `np.float64`: left", "FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform def imcast(img, dtype, color_space=\"default\"): \"\"\"Cast the input image to", "1 # it's 1-indexed per the EXIF spec if 0 <= orientation0 <", "if mosaic_shape is None: num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons) / num_cols)) mosaic_shape", "num_cols)) mosaic_shape = (num_rows, num_cols) mosaic_image_shape = ( mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1)", "image, which affects the casting operation. Returns ------- The output image that is", "num_cols)` One of the `num_rows` or `num_cols` can be -1, which will be", "elif dtype == np.float32 or dtype == np.float64: if img.dtype == np.uint8: dst", "else: num_rows = int(round(img.shape[0] * size)) num_cols = int(round(img.shape[1] * size)) return skimage.transform.resize(img,", "order to determine its dimension. Parameters ---------- filename: string The input image file.", "output mosaic. Returns ------- The created mosaic image. \"\"\" # Set default parameters.", "`dtype`. Notes ----- * For `color_space==\"default\"`, we perform a linear scaling with following", "3-tuple, optional The shape of icons in the output mosaic as `(num_rows, num_cols,", "\"\"\"Some utility functions to handle images.\"\"\" import math import numpy as np import", "import math import numpy as np import PIL.Image from PIL.Image import ROTATE_180, ROTATE_90,", "IOError: exif = {} orientation = exif.get(0x0112) if orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html #", "color_space == \"default\": if dtype == np.uint8: if img.dtype == np.uint16: return np.asarray(img", "= np.empty(img.shape, np.uint8) dst[:,:,0] = img[:,:,0] * 255. / 100. dst[:,:,1] = img[:,:,1]", "== 3 num_channels = icon_shape[2] if border_color is None: border_color = np.zeros(num_channels) if", "<- a + 128, b <- b + 128`; * `np.uint16`: currently not", "functions to handle images.\"\"\" import math import numpy as np import PIL.Image from", "* 255., np.uint8) elif dtype == np.uint16: if img.dtype == np.uint8: return np.asarray(img,", "we require all icons to be multi-channel images of the same size. icon_shape:", "`np.float64`: left as is. \"\"\" if img.dtype == dtype: return img if color_space", "optional The shape of icons in the output mosaic as `(num_rows, num_cols, num_channels)`.", "for empty cells, black if not specified. mosaic_shape: 2-tuple, optional The shape of", "dtype == np.float64: if img.dtype == np.uint8: dst = np.empty(img.shape, dtype) dst[:,:,0] =", "FLIP_LEFT_RIGHT import skimage.transform def imcast(img, dtype, color_space=\"default\"): \"\"\"Cast the input image to a", "in the input icons. for idx in xrange(num_icons): i = idx / mosaic_shape[1]", "= img.convert(\"RGB\") if hasattr(img, \"_getexif\"): try: exif = img._getexif() or {} except IOError:", "b <= 127`, and we perform the following cast: * `np.uint8`: `L <-", "imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read the image followed by an :py:func:`imcast`.\"\"\" img = PIL.Image.open(filename)", "\"Unexpected conversion from '%s' to '%s' with '%s' color space\" % \\ (img.dtype,", "`np.uint16`: `[0, 65535]`; * `np.float32` and `np.float64`: `[0.0, 1.0]`. For example, if the", "a nice visual explanation # also, rotations are counter-clockwise in PIL orientation =", "np.uint8) elif dtype == np.uint16: if img.dtype == np.uint8: return np.asarray(img, np.uint16) *", "color_space=\"default\"): \"\"\"Read the image followed by an :py:func:`imcast`.\"\"\" img = PIL.Image.open(filename) if img.mode", "len(icon_shape) == 3 num_channels = icon_shape[2] if border_color is None: border_color = np.zeros(num_channels)", "border_color is None: border_color = np.zeros(num_channels) if empty_color is None: empty_color = np.zeros(num_channels)", "The input image file. Returns ------- The 2-tuple for image size `(num_rows, num_cols)`.", "img.dtype == np.uint16: return np.asarray(img, dtype) / 65535. elif img.dtype == np.float32 or", "writing to the output file.\"\"\" import scipy.misc return scipy.misc.imsave(filename, imcast(img, dtype, color_space)) def", "The data type of output mosaic. Returns ------- The created mosaic image. \"\"\"", "1-indexed per the EXIF spec if 0 <= orientation0 < len(rotation): if rotation[orientation0]", "the magical constants # or see http://jpegclub.org/exif_orientation.html for a nice visual explanation #", "None: border_color = np.zeros(num_channels) if empty_color is None: empty_color = np.zeros(num_channels) if mosaic_shape", "return skimage.transform.resize(img, (num_rows, num_cols)) def create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create", "casting operation. Returns ------- The output image that is cast into `dtype`. Notes", "a, b <= 127`, and we perform the following cast: * `np.uint8`: `L", "PIL.Image.open(filename) if img.mode != \"RGB\": img = img.convert(\"RGB\") if hasattr(img, \"_getexif\"): try: exif", "'%s' with '%s' color space\" % \\ (img.dtype, dtype, color_space)) def imread(filename, dtype=np.uint8,", "img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an :py:func:`imcast` before writing to the output file.\"\"\" import", "Created: Dec 11, 2014. \"\"\"Some utility functions to handle images.\"\"\" import math import", "`np.uint8` type and the expected `dtype` is `np.float32`, then the output will be", "FLIP_LEFT_RIGHT, None] orientation0 = orientation - 1 # it's 1-indexed per the EXIF", "and `np.float64`: `[0.0, 1.0]`. For example, if the input `img` is of `np.uint8`", "orientation0 = orientation - 1 # it's 1-indexed per the EXIF spec if", "filename: string The input image file. Returns ------- The 2-tuple for image size", "mosaic_shape: 2-tuple, optional The shape of output mosaic as `(num_icons_per_row, num_icons_per_col)`. If not", "with border color. mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] =", "elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img, dtype) elif color_space", "255]`; * `np.uint16`: `[0, 65535]`; * `np.float32` and `np.float64`: `[0.0, 1.0]`. For example,", "space of the input image, which affects the casting operation. Returns ------- The", "Parameters ---------- filename: string The input image file. Returns ------- The 2-tuple for", "Notes ----- * For `color_space==\"default\"`, we perform a linear scaling with following range", "empty icons with empty colors. for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i = idx", "img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img * 255., np.uint8) elif", "that is cast into `dtype`. Notes ----- * For `color_space==\"default\"`, we perform a", "(num_rows, num_cols) mosaic_image_shape = ( mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size, mosaic_shape[1]", "empty_color[c] return mosaic_image def image_size_from_file(filename): \"\"\"Read the image size from a file. This", "explanation of the magical constants # or see http://jpegclub.org/exif_orientation.html for a nice visual", "rotations are counter-clockwise in PIL orientation = int(orientation) rotation = [None, None, ROTATE_180,", "imcast(np.array(img), dtype, color_space) def imwrite(filename, img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an :py:func:`imcast` before writing", "currently not supported; * `np.float32` and `np.float64`: left as is. \"\"\" if img.dtype", "0: num_cols = num_rows * img.shape[1] / img.shape[0] else: num_rows = int(round(img.shape[0] *", "num_icons = len(icons) assert num_icons > 0 if icon_shape is None: icon_shape =", "+ border_size) jStart = j * (icon_shape[1] + border_size) for c in xrange(mosaic_image.shape[2]):", "== np.float64: dst = np.empty(img.shape, np.uint8) dst[:,:,0] = img[:,:,0] * 255. / 100.", "The input image. dtype: np.dtype The type that output image to be cast", "is `np.float32`, then the output will be `np.asarray(img / 255., np.float32)`. * For", "to a given data type. Parameters ---------- img: ndarray The input image. dtype:", "np import PIL.Image from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform", "fill with border color. mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c]", "* border_size, mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size, icon_shape[2]) # Create mosaic", "border_size=1, border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a mosaic of image icons. Parameters ----------", "optional The color of border, black if not specified. empty_color: 3-tuple, optional The", "<= a, b <= 127`, and we perform the following cast: * `np.uint8`:", "# Created: Dec 11, 2014. \"\"\"Some utility functions to handle images.\"\"\" import math", "img.dtype == np.float64: dst = np.empty(img.shape, np.uint8) dst[:,:,0] = img[:,:,0] * 255. /", "mosaic image. \"\"\" # Set default parameters. num_icons = len(icons) assert num_icons >", "#!/usr/bin/env python # # Author: <NAME>. # Created: Dec 11, 2014. \"\"\"Some utility", "together for mosaic. Currently we require all icons to be multi-channel images of", "<= orientation0 < len(rotation): if rotation[orientation0] is not None: img = img.transpose(rotation[orientation0]) if", "== np.float32 or img.dtype == np.float64: return np.asarray(img * 255., np.uint8) elif dtype", "i = idx / mosaic_shape[1] j = idx % mosaic_shape[1] iStart = i", "list of icons to be put together for mosaic. Currently we require all", "dimension. Parameters ---------- filename: string The input image file. Returns ------- The 2-tuple", "np.float32 or dtype == np.float64: if img.dtype == np.uint8: dst = np.empty(img.shape, dtype)", "257 elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img * 65535.,", "from a file. This function only loads but the image header (rather than", "c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c] return mosaic_image def image_size_from_file(filename): \"\"\"Read the", "img.dtype == np.float64: return np.asarray(img, dtype) elif color_space == \"CIE-L*a*b*\": if dtype ==", "optional The shape of output mosaic as `(num_icons_per_row, num_icons_per_col)`. If not specified, try", "\"\"\" if img.dtype == dtype: return img if color_space == \"default\": if dtype", "a given data type. Parameters ---------- img: ndarray The input image. dtype: np.dtype", "if not specified. empty_color: 3-tuple, optional The color for empty cells, black if", "== np.float32 or dtype == np.float64: if img.dtype == np.uint8: return np.asarray(img, dtype)", "input image to be resized. size: a scalar for `scale` or a 2-tuple", "== np.float64: return np.asarray(img * 65535., np.uint16) elif dtype == np.float32 or dtype", "---------- img: ndarray The input image. dtype: np.dtype The type that output image", "= img._getexif() or {} except IOError: exif = {} orientation = exif.get(0x0112) if", "PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform def imcast(img, dtype, color_space=\"default\"):", "icon_shape = icons[0].shape assert len(icon_shape) == 3 num_channels = icon_shape[2] if border_color is", "mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size, icon_shape[2]) # Create mosaic image and", "`np.float64`: `[0.0, 1.0]`. For example, if the input `img` is of `np.uint8` type", "mosaic_shape is None: num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons) / num_cols)) mosaic_shape =", "that the output image has the same aspect ratio as the input. Returns", "127`, and we perform the following cast: * `np.uint8`: `L <- L *", "= icon_shape[2] if border_color is None: border_color = np.zeros(num_channels) if empty_color is None:", "return np.asarray(img, dtype) / 65535. elif img.dtype == np.float32 or img.dtype == np.float64:", "\"\"\"Perform an :py:func:`imcast` before writing to the output file.\"\"\" import scipy.misc return scipy.misc.imsave(filename,", "border_color[c] # Fill in the input icons. for idx in xrange(num_icons): i =", "= i * (icon_shape[0] + border_size) jStart = j * (icon_shape[1] + border_size)", "ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90] flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT,", "( mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size, mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1)", "output mosaic as `(num_icons_per_row, num_icons_per_col)`. If not specified, try to make a square", "space\" % \\ (img.dtype, dtype, color_space)) def imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read the image", "icons in the output mosaic as `(num_rows, num_cols, num_channels)`. If not specified, use", "border_color: 3-tuple, optional The color of border, black if not specified. empty_color: 3-tuple,", "if 0 <= orientation0 < len(rotation): if rotation[orientation0] is not None: img =", "also, rotations are counter-clockwise in PIL orientation = int(orientation) rotation = [None, None,", "cast into `dtype`. Notes ----- * For `color_space==\"default\"`, we perform a linear scaling", "----- * For `color_space==\"default\"`, we perform a linear scaling with following range conventions:", "+ 128`; * `np.uint16`: currently not supported; * `np.float32` and `np.float64`: left as", "scipy.misc return scipy.misc.imsave(filename, imcast(img, dtype, color_space)) def imresize(img, size): \"\"\"Resize the input image.", "require all icons to be multi-channel images of the same size. icon_shape: 3-tuple,", "size. icon_shape: 3-tuple, optional The shape of icons in the output mosaic as", "if img.mode != \"RGB\": img = img.convert(\"RGB\") if hasattr(img, \"_getexif\"): try: exif =", "* size)) return skimage.transform.resize(img, (num_rows, num_cols)) def create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None, empty_color=None, mosaic_shape=None,", "`icons`. border_size: int, optional The size of border. border_color: 3-tuple, optional The color", "border. border_color: 3-tuple, optional The color of border, black if not specified. empty_color:", "try to make a square mosaic according to number of icons. mosaic_dtype: dtype", "data type of output mosaic. Returns ------- The created mosaic image. \"\"\" #", "expected `dtype` is `np.float32`, then the output will be `np.asarray(img / 255., np.float32)`.", "np.uint16: return np.asarray(img, dtype) / 65535. elif img.dtype == np.float32 or img.dtype ==", "try: exif = img._getexif() or {} except IOError: exif = {} orientation =", "np.asarray(img, np.uint16) * 257 elif img.dtype == np.float32 or img.dtype == np.float64: return", "The input image to be resized. size: a scalar for `scale` or a", "Set default parameters. num_icons = len(icons) assert num_icons > 0 if icon_shape is", "orientation = int(orientation) rotation = [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90]", "assert num_icons > 0 if icon_shape is None: icon_shape = icons[0].shape assert len(icon_shape)", "== np.float32 or dtype == np.float64: if img.dtype == np.uint8: dst = np.empty(img.shape,", "scipy.misc.imsave(filename, imcast(img, dtype, color_space)) def imresize(img, size): \"\"\"Resize the input image. Parameters ----------", "as the input. Returns ------- The resized image. \"\"\" if hasattr(size, \"__len__\"): num_rows,", "a scalar for `scale` or a 2-tuple for `(num_rows, num_cols)` One of the", "size `(num_rows, num_cols)`. \"\"\" with PIL.Image.open(filename) as img: width, height = img.size return", "= np.asarray(img[:,:,1], dtype) - 128. dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128. return dst", "ROTATE_90] flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None] orientation0 =", "black if not specified. empty_color: 3-tuple, optional The color for empty cells, black", "input image. dtype: np.dtype The type that output image to be cast into.", "icons to be multi-channel images of the same size. icon_shape: 3-tuple, optional The", "img.dtype == np.float64: return np.asarray(img * 255., np.uint8) elif dtype == np.uint16: if", "mosaic_shape = (num_rows, num_cols) mosaic_image_shape = ( mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) *", "i * (icon_shape[0] + border_size) jStart = j * (icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0],", "# Fill the empty icons with empty colors. for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]):", "size): \"\"\"Resize the input image. Parameters ---------- img: ndarray The input image to", "def imcast(img, dtype, color_space=\"default\"): \"\"\"Cast the input image to a given data type.", "create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a mosaic of image icons.", "np.dtype The type that output image to be cast into. color_space: string, optional", "file. This function only loads but the image header (rather than the whole", "(num_rows > 0) or (num_cols > 0) if num_rows < 0: num_rows =", "images.\"\"\" import math import numpy as np import PIL.Image from PIL.Image import ROTATE_180,", "[None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90] flip = [None, FLIP_LEFT_RIGHT, None,", "not specified. empty_color: 3-tuple, optional The color for empty cells, black if not", "== \"default\": if dtype == np.uint8: if img.dtype == np.uint16: return np.asarray(img /", "with '%s' color space\" % \\ (img.dtype, dtype, color_space)) def imread(filename, dtype=np.uint8, color_space=\"default\"):", "\"__len__\"): num_rows, num_cols = size assert (num_rows > 0) or (num_cols > 0)", "img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img * 65535., np.uint16) elif", "http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation of the magical constants # or see http://jpegclub.org/exif_orientation.html for", "icon_shape[0] + (mosaic_shape[0]-1) * border_size, mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size, icon_shape[2])", "mosaic_image[:,:,c] = border_color[c] # Fill in the input icons. for idx in xrange(num_icons):", "num_icons > 0 if icon_shape is None: icon_shape = icons[0].shape assert len(icon_shape) ==", "return dst elif dtype == np.float32 or dtype == np.float64: if img.dtype ==", "value ranges are `0 <= L <= 100, -127 <= a, b <=", "mosaic of image icons. Parameters ---------- icons: a list of `ndarray`s A list", "255 / 100, a <- a + 128, b <- b + 128`;", "= np.asarray(img[:,:,2], dtype) - 128. return dst raise Exception( \"Unexpected conversion from '%s'", "mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size, mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) *", "counter-clockwise in PIL orientation = int(orientation) rotation = [None, None, ROTATE_180, None, ROTATE_270,", "(icon_shape[0] + border_size) jStart = j * (icon_shape[1] + border_size) for c in", "One of the `num_rows` or `num_cols` can be -1, which will be inferred", "0) if num_rows < 0: num_rows = num_cols * img.shape[0] / img.shape[1] if", "/ 255. * 100. dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128. dst[:,:,2] = np.asarray(img[:,:,2],", "[None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None] orientation0 = orientation - 1", "= img[:,:,2] + 128. return dst elif dtype == np.float32 or dtype ==", "= img.transpose(flip[orientation0]) return imcast(np.array(img), dtype, color_space) def imwrite(filename, img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an", "The color of border, black if not specified. empty_color: 3-tuple, optional The color", "The color for empty cells, black if not specified. mosaic_shape: 2-tuple, optional The", "images of the same size. icon_shape: 3-tuple, optional The shape of icons in", "return np.asarray(img, dtype) / 255. elif img.dtype == np.uint16: return np.asarray(img, dtype) /", "img.dtype == np.float32 or img.dtype == np.float64: dst = np.empty(img.shape, np.uint8) dst[:,:,0] =", "= j * (icon_shape[1] + border_size) for c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] =", "icons to be put together for mosaic. Currently we require all icons to", "Dec 11, 2014. \"\"\"Some utility functions to handle images.\"\"\" import math import numpy", "= ( mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size, mosaic_shape[1] * icon_shape[1] +", "The shape of output mosaic as `(num_icons_per_row, num_icons_per_col)`. If not specified, try to", "icons[0].shape assert len(icon_shape) == 3 num_channels = icon_shape[2] if border_color is None: border_color", "same size. icon_shape: 3-tuple, optional The shape of icons in the output mosaic", "{} orientation = exif.get(0x0112) if orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation of", "for a nice visual explanation # also, rotations are counter-clockwise in PIL orientation", "<= L <= 100, -127 <= a, b <= 127`, and we perform", "to be resized. size: a scalar for `scale` or a 2-tuple for `(num_rows,", "(num_rows, num_cols)) def create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a mosaic", "The output image that is cast into `dtype`. Notes ----- * For `color_space==\"default\"`,", "return dst raise Exception( \"Unexpected conversion from '%s' to '%s' with '%s' color", "data type. Parameters ---------- img: ndarray The input image. dtype: np.dtype The type", "dst raise Exception( \"Unexpected conversion from '%s' to '%s' with '%s' color space\"", "we perform the following cast: * `np.uint8`: `L <- L * 255 /", "# Fill in the input icons. for idx in xrange(num_icons): i = idx", "257, np.uint8) elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img *", "np.uint8: dst = np.empty(img.shape, dtype) dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100.", "mosaic_shape[0]*mosaic_shape[1]): i = idx / mosaic_shape[1] j = idx % mosaic_shape[1] iStart =", "\"_getexif\"): try: exif = img._getexif() or {} except IOError: exif = {} orientation", "= np.zeros(num_channels) if mosaic_shape is None: num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons) /", "# Author: <NAME>. # Created: Dec 11, 2014. \"\"\"Some utility functions to handle", "65535., np.uint16) elif dtype == np.float32 or dtype == np.float64: if img.dtype ==", "with following range conventions: * `np.uint8`: `[0, 255]`; * `np.uint16`: `[0, 65535]`; *", "input image, which affects the casting operation. Returns ------- The output image that", "num_rows = int(math.ceil(float(num_icons) / num_cols)) mosaic_shape = (num_rows, num_cols) mosaic_image_shape = ( mosaic_shape[0]", "= num_rows * img.shape[1] / img.shape[0] else: num_rows = int(round(img.shape[0] * size)) num_cols", "if hasattr(img, \"_getexif\"): try: exif = img._getexif() or {} except IOError: exif =", "a file. This function only loads but the image header (rather than the", "elif dtype == np.float32 or dtype == np.float64: if img.dtype == np.uint8: return", "img = img.convert(\"RGB\") if hasattr(img, \"_getexif\"): try: exif = img._getexif() or {} except", "int(orientation) rotation = [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90] flip =", "not supported; * `np.float32` and `np.float64`: left as is. \"\"\" if img.dtype ==", "np.float32 or img.dtype == np.float64: dst = np.empty(img.shape, np.uint8) dst[:,:,0] = img[:,:,0] *", "None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90] flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None,", "img.shape[0] / img.shape[1] if num_cols < 0: num_cols = num_rows * img.shape[1] /", "the input image to a given data type. Parameters ---------- img: ndarray The", "idx in xrange(num_icons): i = idx / mosaic_shape[1] j = idx % mosaic_shape[1]", "If not specified, use the shape of first image in `icons`. border_size: int,", "np.float64: if img.dtype == np.uint8: return np.asarray(img, dtype) / 255. elif img.dtype ==", "np.empty(img.shape, dtype) dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100. dst[:,:,1] = np.asarray(img[:,:,1],", "dtype) - 128. return dst raise Exception( \"Unexpected conversion from '%s' to '%s'", "the image header (rather than the whole rasterized data) in order to determine", "border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a mosaic of image icons. Parameters ---------- icons:", "- 128. return dst raise Exception( \"Unexpected conversion from '%s' to '%s' with", "* 65535., np.uint16) elif dtype == np.float32 or dtype == np.float64: if img.dtype", "scaling with following range conventions: * `np.uint8`: `[0, 255]`; * `np.uint16`: `[0, 65535]`;", "Parameters ---------- img: ndarray The input image. dtype: np.dtype The type that output", "img if color_space == \"default\": if dtype == np.uint8: if img.dtype == np.uint16:", "dtype == np.float32 or dtype == np.float64: if img.dtype == np.uint8: return np.asarray(img,", "image. Parameters ---------- img: ndarray The input image to be resized. size: a", "(img.dtype, dtype, color_space)) def imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read the image followed by an", "orientation0 < len(rotation): if rotation[orientation0] is not None: img = img.transpose(rotation[orientation0]) if flip[orientation0]", "be multi-channel images of the same size. icon_shape: 3-tuple, optional The shape of", "size from a file. This function only loads but the image header (rather", "icon_shape[1] + (mosaic_shape[1]-1) * border_size, icon_shape[2]) # Create mosaic image and fill with", "np.uint16) * 257 elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img", "(mosaic_shape[0]-1) * border_size, mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size, icon_shape[2]) # Create", "\"\"\" # Set default parameters. num_icons = len(icons) assert num_icons > 0 if", "color. mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c] #", "color space of the input image, which affects the casting operation. Returns -------", "mosaic. Returns ------- The created mosaic image. \"\"\" # Set default parameters. num_icons", "mosaic_image_shape = ( mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size, mosaic_shape[1] * icon_shape[1]", "num_cols = size assert (num_rows > 0) or (num_cols > 0) if num_rows", "\"\"\" if hasattr(size, \"__len__\"): num_rows, num_cols = size assert (num_rows > 0) or", "numpy as np import PIL.Image from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT", "dtype == np.uint8: if img.dtype == np.uint16: return np.asarray(img / 257, np.uint8) elif", "Returns ------- The resized image. \"\"\" if hasattr(size, \"__len__\"): num_rows, num_cols = size", "mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a mosaic of image icons. Parameters ---------- icons: a list", "\"\"\"Create a mosaic of image icons. Parameters ---------- icons: a list of `ndarray`s", "icon_shape is None: icon_shape = icons[0].shape assert len(icon_shape) == 3 num_channels = icon_shape[2]", "\"\"\"Read the image size from a file. This function only loads but the", "- 128. dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128. return dst raise Exception( \"Unexpected", "a square mosaic according to number of icons. mosaic_dtype: dtype The data type", "128. return dst elif dtype == np.float32 or dtype == np.float64: if img.dtype", "of icons to be put together for mosaic. Currently we require all icons", "mosaic as `(num_rows, num_cols, num_channels)`. If not specified, use the shape of first", "import numpy as np import PIL.Image from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM,", "image size `(num_rows, num_cols)`. \"\"\" with PIL.Image.open(filename) as img: width, height = img.size", "cast: * `np.uint8`: `L <- L * 255 / 100, a <- a", "magical constants # or see http://jpegclub.org/exif_orientation.html for a nice visual explanation # also,", "exif.get(0x0112) if orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation of the magical constants", "or dtype == np.float64: if img.dtype == np.uint8: return np.asarray(img, dtype) / 255.", "/ 257, np.uint8) elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img", "# or see http://jpegclub.org/exif_orientation.html for a nice visual explanation # also, rotations are", "= np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c] # Fill in", "in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i = idx / mosaic_shape[1] j = idx % mosaic_shape[1]", "jStart = j * (icon_shape[1] + border_size) for c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c]", "+ border_size) jStart = j * (icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx]", "color_space == \"CIE-L*a*b*\": if dtype == np.uint8: if img.dtype == np.float32 or img.dtype", "the shape of first image in `icons`. border_size: int, optional The size of", "img.dtype == np.uint8: return np.asarray(img, dtype) / 255. elif img.dtype == np.uint16: return", "affects the casting operation. Returns ------- The output image that is cast into", "+ (mosaic_shape[0]-1) * border_size, mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size, icon_shape[2]) #", "if img.dtype == np.float32 or img.dtype == np.float64: dst = np.empty(img.shape, np.uint8) dst[:,:,0]", "the input. Returns ------- The resized image. \"\"\" if hasattr(size, \"__len__\"): num_rows, num_cols", "if hasattr(size, \"__len__\"): num_rows, num_cols = size assert (num_rows > 0) or (num_cols", "+ 128, b <- b + 128`; * `np.uint16`: currently not supported; *", "<NAME>. # Created: Dec 11, 2014. \"\"\"Some utility functions to handle images.\"\"\" import", "(icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx] # Fill the empty icons with", "is None: icon_shape = icons[0].shape assert len(icon_shape) == 3 num_channels = icon_shape[2] if", "determine its dimension. Parameters ---------- filename: string The input image file. Returns -------", "img[:,:,1] + 128. dst[:,:,2] = img[:,:,2] + 128. return dst elif dtype ==", "or img.dtype == np.float64: return np.asarray(img * 255., np.uint8) elif dtype == np.uint16:", "img.dtype == np.float64: return np.asarray(img * 65535., np.uint16) elif dtype == np.float32 or", "Returns ------- The 2-tuple for image size `(num_rows, num_cols)`. \"\"\" with PIL.Image.open(filename) as", "skimage.transform.resize(img, (num_rows, num_cols)) def create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a", "color_space) def imwrite(filename, img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an :py:func:`imcast` before writing to the", "== np.float32 or img.dtype == np.float64: dst = np.empty(img.shape, np.uint8) dst[:,:,0] = img[:,:,0]", "string The input image file. Returns ------- The 2-tuple for image size `(num_rows,", "ranges are `0 <= L <= 100, -127 <= a, b <= 127`,", "the same size. icon_shape: 3-tuple, optional The shape of icons in the output", "elif color_space == \"CIE-L*a*b*\": if dtype == np.uint8: if img.dtype == np.float32 or", "== np.uint8: return np.asarray(img, dtype) / 255. elif img.dtype == np.uint16: return np.asarray(img,", "dtype) / 255. * 100. dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128. dst[:,:,2] =", "has the same aspect ratio as the input. Returns ------- The resized image.", "= np.empty(img.shape, dtype) dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100. dst[:,:,1] =", "for `scale` or a 2-tuple for `(num_rows, num_cols)` One of the `num_rows` or", "= border_color[c] # Fill in the input icons. for idx in xrange(num_icons): i", "inferred such that the output image has the same aspect ratio as the", "imcast(img, dtype, color_space=\"default\"): \"\"\"Cast the input image to a given data type. Parameters", "num_cols < 0: num_cols = num_rows * img.shape[1] / img.shape[0] else: num_rows =", "be `np.asarray(img / 255., np.float32)`. * For `color_space==\"CIE-L*a*b*\"`, the \"normal\" value ranges are", "if num_rows < 0: num_rows = num_cols * img.shape[0] / img.shape[1] if num_cols", "`[0.0, 1.0]`. For example, if the input `img` is of `np.uint8` type and", "resized. size: a scalar for `scale` or a 2-tuple for `(num_rows, num_cols)` One", "input. Returns ------- The resized image. \"\"\" if hasattr(size, \"__len__\"): num_rows, num_cols =", "color for empty cells, black if not specified. mosaic_shape: 2-tuple, optional The shape", "== np.float32 or img.dtype == np.float64: return np.asarray(img, dtype) elif color_space == \"CIE-L*a*b*\":", "+ 128. return dst elif dtype == np.float32 or dtype == np.float64: if", "<- L * 255 / 100, a <- a + 128, b <-", "the casting operation. Returns ------- The output image that is cast into `dtype`.", "The created mosaic image. \"\"\" # Set default parameters. num_icons = len(icons) assert", "------- The 2-tuple for image size `(num_rows, num_cols)`. \"\"\" with PIL.Image.open(filename) as img:", "is of `np.uint8` type and the expected `dtype` is `np.float32`, then the output", "dtype) / 65535. elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img,", "np.float32 or img.dtype == np.float64: return np.asarray(img, dtype) elif color_space == \"CIE-L*a*b*\": if", "in the output mosaic as `(num_rows, num_cols, num_channels)`. If not specified, use the", "PIL.Image from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform def imcast(img,", "c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c] # Fill in the input icons. for", "\"\"\"Read the image followed by an :py:func:`imcast`.\"\"\" img = PIL.Image.open(filename) if img.mode !=", "its dimension. Parameters ---------- filename: string The input image file. Returns ------- The", "border_size) for c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c] return mosaic_image def image_size_from_file(filename):", "`np.uint8`: `L <- L * 255 / 100, a <- a + 128,", "if num_cols < 0: num_cols = num_rows * img.shape[1] / img.shape[0] else: num_rows", "type and the expected `dtype` is `np.float32`, then the output will be `np.asarray(img", "or a 2-tuple for `(num_rows, num_cols)` One of the `num_rows` or `num_cols` can", "created mosaic image. \"\"\" # Set default parameters. num_icons = len(icons) assert num_icons", "then the output will be `np.asarray(img / 255., np.float32)`. * For `color_space==\"CIE-L*a*b*\"`, the", "= int(round(img.shape[1] * size)) return skimage.transform.resize(img, (num_rows, num_cols)) def create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None,", "mosaic_dtype=np.float): \"\"\"Create a mosaic of image icons. Parameters ---------- icons: a list of", "is not None: img = img.transpose(rotation[orientation0]) if flip[orientation0] is not None: img =", "+ 128. dst[:,:,2] = img[:,:,2] + 128. return dst elif dtype == np.float32", "# also, rotations are counter-clockwise in PIL orientation = int(orientation) rotation = [None,", "from '%s' to '%s' with '%s' color space\" % \\ (img.dtype, dtype, color_space))", "color_space: string, optional The color space of the input image, which affects the", "image icons. Parameters ---------- icons: a list of `ndarray`s A list of icons", "specified. mosaic_shape: 2-tuple, optional The shape of output mosaic as `(num_icons_per_row, num_icons_per_col)`. If", "for image size `(num_rows, num_cols)`. \"\"\" with PIL.Image.open(filename) as img: width, height =", "FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None] orientation0 = orientation - 1 # it's 1-indexed", "/ 255. elif img.dtype == np.uint16: return np.asarray(img, dtype) / 65535. elif img.dtype", "not None: img = img.transpose(flip[orientation0]) return imcast(np.array(img), dtype, color_space) def imwrite(filename, img, dtype=np.uint8,", "the `num_rows` or `num_cols` can be -1, which will be inferred such that", "0) or (num_cols > 0) if num_rows < 0: num_rows = num_cols *", "xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i = idx / mosaic_shape[1] j = idx % mosaic_shape[1] iStart", "* (icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx] # Fill the empty icons", "< 0: num_rows = num_cols * img.shape[0] / img.shape[1] if num_cols < 0:", "num_rows = int(round(img.shape[0] * size)) num_cols = int(round(img.shape[1] * size)) return skimage.transform.resize(img, (num_rows,", "`color_space==\"CIE-L*a*b*\"`, the \"normal\" value ranges are `0 <= L <= 100, -127 <=", "elif dtype == np.uint16: if img.dtype == np.uint8: return np.asarray(img, np.uint16) * 257", "np.asarray(img[:,:,2], dtype) - 128. return dst raise Exception( \"Unexpected conversion from '%s' to", "ROTATE_90, ROTATE_90] flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None] orientation0", "`np.float32` and `np.float64`: left as is. \"\"\" if img.dtype == dtype: return img", "list of `ndarray`s A list of icons to be put together for mosaic.", "for mosaic. Currently we require all icons to be multi-channel images of the", "None: img = img.transpose(flip[orientation0]) return imcast(np.array(img), dtype, color_space) def imwrite(filename, img, dtype=np.uint8, color_space=\"default\"):", "jStart:jStart+icon_shape[1],:] = icons[idx] # Fill the empty icons with empty colors. for idx", "are counter-clockwise in PIL orientation = int(orientation) rotation = [None, None, ROTATE_180, None,", "image. \"\"\" if hasattr(size, \"__len__\"): num_rows, num_cols = size assert (num_rows > 0)", "type of output mosaic. Returns ------- The created mosaic image. \"\"\" # Set", "np.uint16: return np.asarray(img / 257, np.uint8) elif img.dtype == np.float32 or img.dtype ==", "empty_color = np.zeros(num_channels) if mosaic_shape is None: num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons)", "if img.dtype == np.uint8: return np.asarray(img, np.uint16) * 257 elif img.dtype == np.float32", "it's 1-indexed per the EXIF spec if 0 <= orientation0 < len(rotation): if", "`L <- L * 255 / 100, a <- a + 128, b", "or img.dtype == np.float64: return np.asarray(img, dtype) elif color_space == \"CIE-L*a*b*\": if dtype", "np.uint8: if img.dtype == np.uint16: return np.asarray(img / 257, np.uint8) elif img.dtype ==", "or (num_cols > 0) if num_rows < 0: num_rows = num_cols * img.shape[0]", "img[:,:,0] * 255. / 100. dst[:,:,1] = img[:,:,1] + 128. dst[:,:,2] = img[:,:,2]", "np.float32 or img.dtype == np.float64: return np.asarray(img * 65535., np.uint16) elif dtype ==", "of the input image, which affects the casting operation. Returns ------- The output", "np.asarray(img * 65535., np.uint16) elif dtype == np.float32 or dtype == np.float64: if", "= img[:,:,1] + 128. dst[:,:,2] = img[:,:,2] + 128. return dst elif dtype", "\"\"\"Resize the input image. Parameters ---------- img: ndarray The input image to be", "A list of icons to be put together for mosaic. Currently we require", "dtype The data type of output mosaic. Returns ------- The created mosaic image.", "Exception( \"Unexpected conversion from '%s' to '%s' with '%s' color space\" % \\", "output mosaic as `(num_rows, num_cols, num_channels)`. If not specified, use the shape of", "\"\"\"Cast the input image to a given data type. Parameters ---------- img: ndarray", "'%s' color space\" % \\ (img.dtype, dtype, color_space)) def imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read", "given data type. Parameters ---------- img: ndarray The input image. dtype: np.dtype The", "as is. \"\"\" if img.dtype == dtype: return img if color_space == \"default\":", "example, if the input `img` is of `np.uint8` type and the expected `dtype`", "input `img` is of `np.uint8` type and the expected `dtype` is `np.float32`, then", "loads but the image header (rather than the whole rasterized data) in order", "`(num_rows, num_cols)`. \"\"\" with PIL.Image.open(filename) as img: width, height = img.size return height,", "dtype) elif color_space == \"CIE-L*a*b*\": if dtype == np.uint8: if img.dtype == np.float32", "np.float32 or img.dtype == np.float64: return np.asarray(img * 255., np.uint8) elif dtype ==", "if flip[orientation0] is not None: img = img.transpose(flip[orientation0]) return imcast(np.array(img), dtype, color_space) def", "L * 255 / 100, a <- a + 128, b <- b", "if icon_shape is None: icon_shape = icons[0].shape assert len(icon_shape) == 3 num_channels =", "* (icon_shape[0] + border_size) jStart = j * (icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:]", "optional The color space of the input image, which affects the casting operation.", "= img[:,:,0] * 255. / 100. dst[:,:,1] = img[:,:,1] + 128. dst[:,:,2] =", "color space\" % \\ (img.dtype, dtype, color_space)) def imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read the", "* size)) num_cols = int(round(img.shape[1] * size)) return skimage.transform.resize(img, (num_rows, num_cols)) def create_icon_mosaic(icons,", "Parameters ---------- icons: a list of `ndarray`s A list of icons to be", "the input `img` is of `np.uint8` type and the expected `dtype` is `np.float32`,", "not specified. mosaic_shape: 2-tuple, optional The shape of output mosaic as `(num_icons_per_row, num_icons_per_col)`.", "if border_color is None: border_color = np.zeros(num_channels) if empty_color is None: empty_color =", "j * (icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx] # Fill the empty", "import PIL.Image from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform def", "raise Exception( \"Unexpected conversion from '%s' to '%s' with '%s' color space\" %", "for explanation of the magical constants # or see http://jpegclub.org/exif_orientation.html for a nice", "3 num_channels = icon_shape[2] if border_color is None: border_color = np.zeros(num_channels) if empty_color", "(icon_shape[0] + border_size) jStart = j * (icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] =", "if orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation of the magical constants #", ":py:func:`imcast`.\"\"\" img = PIL.Image.open(filename) if img.mode != \"RGB\": img = img.convert(\"RGB\") if hasattr(img,", "* `np.uint8`: `[0, 255]`; * `np.uint16`: `[0, 65535]`; * `np.float32` and `np.float64`: `[0.0,", "num_channels)`. If not specified, use the shape of first image in `icons`. border_size:", "If not specified, try to make a square mosaic according to number of", "of border, black if not specified. empty_color: 3-tuple, optional The color for empty", "make a square mosaic according to number of icons. mosaic_dtype: dtype The data", "128. dst[:,:,2] = img[:,:,2] + 128. return dst elif dtype == np.float32 or", "iStart = i * (icon_shape[0] + border_size) jStart = j * (icon_shape[1] +", "border_size) jStart = j * (icon_shape[1] + border_size) for c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0],", "icons. Parameters ---------- icons: a list of `ndarray`s A list of icons to", "= int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons) / num_cols)) mosaic_shape = (num_rows, num_cols) mosaic_image_shape =", "border_color = np.zeros(num_channels) if empty_color is None: empty_color = np.zeros(num_channels) if mosaic_shape is", "according to number of icons. mosaic_dtype: dtype The data type of output mosaic.", "scalar for `scale` or a 2-tuple for `(num_rows, num_cols)` One of the `num_rows`", "the empty icons with empty colors. for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i =", "mosaic_image def image_size_from_file(filename): \"\"\"Read the image size from a file. This function only", "or img.dtype == np.float64: dst = np.empty(img.shape, np.uint8) dst[:,:,0] = img[:,:,0] * 255.", "Author: <NAME>. # Created: Dec 11, 2014. \"\"\"Some utility functions to handle images.\"\"\"", "== np.uint8: dst = np.empty(img.shape, dtype) dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. *", "num_rows, num_cols = size assert (num_rows > 0) or (num_cols > 0) if", "we perform a linear scaling with following range conventions: * `np.uint8`: `[0, 255]`;", "before writing to the output file.\"\"\" import scipy.misc return scipy.misc.imsave(filename, imcast(img, dtype, color_space))", "The type that output image to be cast into. color_space: string, optional The", "128, b <- b + 128`; * `np.uint16`: currently not supported; * `np.float32`", "255. elif img.dtype == np.uint16: return np.asarray(img, dtype) / 65535. elif img.dtype ==", "or see http://jpegclub.org/exif_orientation.html for a nice visual explanation # also, rotations are counter-clockwise", "= (num_rows, num_cols) mosaic_image_shape = ( mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size,", "128. dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128. return dst raise Exception( \"Unexpected conversion", "int(round(img.shape[1] * size)) return skimage.transform.resize(img, (num_rows, num_cols)) def create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None, empty_color=None,", "in xrange(num_icons): i = idx / mosaic_shape[1] j = idx % mosaic_shape[1] iStart", "mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c] return mosaic_image def image_size_from_file(filename): \"\"\"Read the image size from", "image to be resized. size: a scalar for `scale` or a 2-tuple for", "np.uint16) elif dtype == np.float32 or dtype == np.float64: if img.dtype == np.uint8:", "`0 <= L <= 100, -127 <= a, b <= 127`, and we", "idx % mosaic_shape[1] iStart = i * (icon_shape[0] + border_size) jStart = j", "int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons) / num_cols)) mosaic_shape = (num_rows, num_cols) mosaic_image_shape = (", "icons with empty colors. for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i = idx /", "img: ndarray The input image to be resized. size: a scalar for `scale`", "of icons. mosaic_dtype: dtype The data type of output mosaic. Returns ------- The", "of the same size. icon_shape: 3-tuple, optional The shape of icons in the", "255. / 100. dst[:,:,1] = img[:,:,1] + 128. dst[:,:,2] = img[:,:,2] + 128.", "For `color_space==\"default\"`, we perform a linear scaling with following range conventions: * `np.uint8`:", "num_icons_per_col)`. If not specified, try to make a square mosaic according to number", "for c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c] # Fill in the input icons.", "is. \"\"\" if img.dtype == dtype: return img if color_space == \"default\": if", "size of border. border_color: 3-tuple, optional The color of border, black if not", "2014. \"\"\"Some utility functions to handle images.\"\"\" import math import numpy as np", "icon_shape=None, border_size=1, border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a mosaic of image icons. Parameters", "can be -1, which will be inferred such that the output image has", "resized image. \"\"\" if hasattr(size, \"__len__\"): num_rows, num_cols = size assert (num_rows >", "The resized image. \"\"\" if hasattr(size, \"__len__\"): num_rows, num_cols = size assert (num_rows", "`ndarray`s A list of icons to be put together for mosaic. Currently we", "* `np.uint8`: `L <- L * 255 / 100, a <- a +", "`(num_rows, num_cols)` One of the `num_rows` or `num_cols` can be -1, which will", "orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation of the magical constants # or", "For `color_space==\"CIE-L*a*b*\"`, the \"normal\" value ranges are `0 <= L <= 100, -127", "is None: border_color = np.zeros(num_channels) if empty_color is None: empty_color = np.zeros(num_channels) if", "2-tuple, optional The shape of output mosaic as `(num_icons_per_row, num_icons_per_col)`. If not specified,", "dtype, color_space=\"default\"): \"\"\"Cast the input image to a given data type. Parameters ----------", "= icons[0].shape assert len(icon_shape) == 3 num_channels = icon_shape[2] if border_color is None:", "string, optional The color space of the input image, which affects the casting", "icon_shape[2]) # Create mosaic image and fill with border color. mosaic_image = np.empty(mosaic_image_shape,", "explanation # also, rotations are counter-clockwise in PIL orientation = int(orientation) rotation =", "img.shape[1] / img.shape[0] else: num_rows = int(round(img.shape[0] * size)) num_cols = int(round(img.shape[1] *", "num_cols) mosaic_image_shape = ( mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size, mosaic_shape[1] *", "data) in order to determine its dimension. Parameters ---------- filename: string The input", "'%s' to '%s' with '%s' color space\" % \\ (img.dtype, dtype, color_space)) def", "icons: a list of `ndarray`s A list of icons to be put together", "if not specified. mosaic_shape: 2-tuple, optional The shape of output mosaic as `(num_icons_per_row,", "be inferred such that the output image has the same aspect ratio as", "to be multi-channel images of the same size. icon_shape: 3-tuple, optional The shape", "<= 100, -127 <= a, b <= 127`, and we perform the following", "dtype == np.float32 or dtype == np.float64: if img.dtype == np.uint8: dst =", "/ 100. dst[:,:,1] = img[:,:,1] + 128. dst[:,:,2] = img[:,:,2] + 128. return", "* icon_shape[1] + (mosaic_shape[1]-1) * border_size, icon_shape[2]) # Create mosaic image and fill", "to handle images.\"\"\" import math import numpy as np import PIL.Image from PIL.Image", "if dtype == np.uint8: if img.dtype == np.float32 or img.dtype == np.float64: dst", "constants # or see http://jpegclub.org/exif_orientation.html for a nice visual explanation # also, rotations", "such that the output image has the same aspect ratio as the input.", "multi-channel images of the same size. icon_shape: 3-tuple, optional The shape of icons", "number of icons. mosaic_dtype: dtype The data type of output mosaic. Returns -------", "image size from a file. This function only loads but the image header", "the output file.\"\"\" import scipy.misc return scipy.misc.imsave(filename, imcast(img, dtype, color_space)) def imresize(img, size):", "------- The output image that is cast into `dtype`. Notes ----- * For", "to be put together for mosaic. Currently we require all icons to be", "------- The resized image. \"\"\" if hasattr(size, \"__len__\"): num_rows, num_cols = size assert", "The shape of icons in the output mosaic as `(num_rows, num_cols, num_channels)`. If", "color_space)) def imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read the image followed by an :py:func:`imcast`.\"\"\" img", "1.0]`. For example, if the input `img` is of `np.uint8` type and the", "# Create mosaic image and fill with border color. mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype)", "== np.uint8: return np.asarray(img, np.uint16) * 257 elif img.dtype == np.float32 or img.dtype", "but the image header (rather than the whole rasterized data) in order to", "return np.asarray(img, dtype) elif color_space == \"CIE-L*a*b*\": if dtype == np.uint8: if img.dtype", "and `np.float64`: left as is. \"\"\" if img.dtype == dtype: return img if", "`img` is of `np.uint8` type and the expected `dtype` is `np.float32`, then the", "`(num_rows, num_cols, num_channels)`. If not specified, use the shape of first image in", "image. \"\"\" # Set default parameters. num_icons = len(icons) assert num_icons > 0", "* `np.uint16`: `[0, 65535]`; * `np.float32` and `np.float64`: `[0.0, 1.0]`. For example, if", "== np.float64: return np.asarray(img * 255., np.uint8) elif dtype == np.uint16: if img.dtype", "= exif.get(0x0112) if orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation of the magical", "def imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read the image followed by an :py:func:`imcast`.\"\"\" img =", "rotation[orientation0] is not None: img = img.transpose(rotation[orientation0]) if flip[orientation0] is not None: img", "mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx] # Fill the empty icons with empty colors. for", "an :py:func:`imcast` before writing to the output file.\"\"\" import scipy.misc return scipy.misc.imsave(filename, imcast(img,", "Currently we require all icons to be multi-channel images of the same size.", "None: num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons) / num_cols)) mosaic_shape = (num_rows, num_cols)", "dtype: return img if color_space == \"default\": if dtype == np.uint8: if img.dtype", "* `np.uint16`: currently not supported; * `np.float32` and `np.float64`: left as is. \"\"\"", "img.dtype == np.uint8: return np.asarray(img, np.uint16) * 257 elif img.dtype == np.float32 or", "np.asarray(img, dtype) elif color_space == \"CIE-L*a*b*\": if dtype == np.uint8: if img.dtype ==", "np.float64: return np.asarray(img * 65535., np.uint16) elif dtype == np.float32 or dtype ==", "= np.asarray(img[:,:,0], dtype) / 255. * 100. dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128.", "# # Author: <NAME>. # Created: Dec 11, 2014. \"\"\"Some utility functions to", "the EXIF spec if 0 <= orientation0 < len(rotation): if rotation[orientation0] is not", "return np.asarray(img, np.uint16) * 257 elif img.dtype == np.float32 or img.dtype == np.float64:", "xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c] # Fill in the input icons. for idx in", "255. * 100. dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128. dst[:,:,2] = np.asarray(img[:,:,2], dtype)", "(rather than the whole rasterized data) in order to determine its dimension. Parameters", "a list of `ndarray`s A list of icons to be put together for", "3-tuple, optional The color for empty cells, black if not specified. mosaic_shape: 2-tuple,", "int, optional The size of border. border_color: 3-tuple, optional The color of border,", "border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx] # Fill the empty icons with empty colors.", "img[:,:,2] + 128. return dst elif dtype == np.float32 or dtype == np.float64:", "* For `color_space==\"CIE-L*a*b*\"`, the \"normal\" value ranges are `0 <= L <= 100,", "imresize(img, size): \"\"\"Resize the input image. Parameters ---------- img: ndarray The input image", "< len(rotation): if rotation[orientation0] is not None: img = img.transpose(rotation[orientation0]) if flip[orientation0] is", "`color_space==\"default\"`, we perform a linear scaling with following range conventions: * `np.uint8`: `[0,", "exif = {} orientation = exif.get(0x0112) if orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for", "num_rows * img.shape[1] / img.shape[0] else: num_rows = int(round(img.shape[0] * size)) num_cols =", "num_cols)`. \"\"\" with PIL.Image.open(filename) as img: width, height = img.size return height, width", "nice visual explanation # also, rotations are counter-clockwise in PIL orientation = int(orientation)", "file.\"\"\" import scipy.misc return scipy.misc.imsave(filename, imcast(img, dtype, color_space)) def imresize(img, size): \"\"\"Resize the", "elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img * 65535., np.uint16)", "of icons in the output mosaic as `(num_rows, num_cols, num_channels)`. If not specified,", "put together for mosaic. Currently we require all icons to be multi-channel images", "img.dtype == np.uint8: dst = np.empty(img.shape, dtype) dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255.", "left as is. \"\"\" if img.dtype == dtype: return img if color_space ==", "{} except IOError: exif = {} orientation = exif.get(0x0112) if orientation: # see", "of the magical constants # or see http://jpegclub.org/exif_orientation.html for a nice visual explanation", ":py:func:`imcast` before writing to the output file.\"\"\" import scipy.misc return scipy.misc.imsave(filename, imcast(img, dtype,", "<- b + 128`; * `np.uint16`: currently not supported; * `np.float32` and `np.float64`:", "flip[orientation0] is not None: img = img.transpose(flip[orientation0]) return imcast(np.array(img), dtype, color_space) def imwrite(filename,", "= [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None] orientation0 = orientation -", "optional The color for empty cells, black if not specified. mosaic_shape: 2-tuple, optional", "dst elif dtype == np.float32 or dtype == np.float64: if img.dtype == np.uint8:", "by an :py:func:`imcast`.\"\"\" img = PIL.Image.open(filename) if img.mode != \"RGB\": img = img.convert(\"RGB\")", "* 257 elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img *", "= size assert (num_rows > 0) or (num_cols > 0) if num_rows <", "the whole rasterized data) in order to determine its dimension. Parameters ---------- filename:", "input icons. for idx in xrange(num_icons): i = idx / mosaic_shape[1] j =", "skimage.transform def imcast(img, dtype, color_space=\"default\"): \"\"\"Cast the input image to a given data", "(icon_shape[1] + border_size) for c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c] return mosaic_image", "in order to determine its dimension. Parameters ---------- filename: string The input image", "255., np.float32)`. * For `color_space==\"CIE-L*a*b*\"`, the \"normal\" value ranges are `0 <= L", "ratio as the input. Returns ------- The resized image. \"\"\" if hasattr(size, \"__len__\"):", "input image file. Returns ------- The 2-tuple for image size `(num_rows, num_cols)`. \"\"\"", "img: ndarray The input image. dtype: np.dtype The type that output image to", "= num_cols * img.shape[0] / img.shape[1] if num_cols < 0: num_cols = num_rows", "len(rotation): if rotation[orientation0] is not None: img = img.transpose(rotation[orientation0]) if flip[orientation0] is not", "11, 2014. \"\"\"Some utility functions to handle images.\"\"\" import math import numpy as", "color_space=\"default\"): \"\"\"Cast the input image to a given data type. Parameters ---------- img:", "mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c] # Fill", "== np.uint16: if img.dtype == np.uint8: return np.asarray(img, np.uint16) * 257 elif img.dtype", "color_space=\"default\"): \"\"\"Perform an :py:func:`imcast` before writing to the output file.\"\"\" import scipy.misc return", "border color. mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c in xrange(mosaic_image.shape[2]): mosaic_image[:,:,c] = border_color[c]", "== np.float32 or img.dtype == np.float64: return np.asarray(img * 65535., np.uint16) elif dtype", "if empty_color is None: empty_color = np.zeros(num_channels) if mosaic_shape is None: num_cols =", "border, black if not specified. empty_color: 3-tuple, optional The color for empty cells,", "shape of first image in `icons`. border_size: int, optional The size of border.", "empty colors. for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i = idx / mosaic_shape[1] j", "or img.dtype == np.float64: return np.asarray(img * 65535., np.uint16) elif dtype == np.float32", "size)) return skimage.transform.resize(img, (num_rows, num_cols)) def create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float):", "return img if color_space == \"default\": if dtype == np.uint8: if img.dtype ==", "icon_shape: 3-tuple, optional The shape of icons in the output mosaic as `(num_rows,", "= int(orientation) rotation = [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90] flip", "> 0 if icon_shape is None: icon_shape = icons[0].shape assert len(icon_shape) == 3", "not None: img = img.transpose(rotation[orientation0]) if flip[orientation0] is not None: img = img.transpose(flip[orientation0])", "mosaic_shape[1] j = idx % mosaic_shape[1] iStart = i * (icon_shape[0] + border_size)", "mosaic image and fill with border color. mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c", "/ 255., np.float32)`. * For `color_space==\"CIE-L*a*b*\"`, the \"normal\" value ranges are `0 <=", "return scipy.misc.imsave(filename, imcast(img, dtype, color_space)) def imresize(img, size): \"\"\"Resize the input image. Parameters", "return np.asarray(img / 257, np.uint8) elif img.dtype == np.float32 or img.dtype == np.float64:", "import scipy.misc return scipy.misc.imsave(filename, imcast(img, dtype, color_space)) def imresize(img, size): \"\"\"Resize the input", "specified. empty_color: 3-tuple, optional The color for empty cells, black if not specified.", "np.uint8: return np.asarray(img, dtype) / 255. elif img.dtype == np.uint16: return np.asarray(img, dtype)", "see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation of the magical constants # or see http://jpegclub.org/exif_orientation.html", "which affects the casting operation. Returns ------- The output image that is cast", "import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform def imcast(img, dtype, color_space=\"default\"): \"\"\"Cast", "== np.uint16: return np.asarray(img / 257, np.uint8) elif img.dtype == np.float32 or img.dtype", "elif img.dtype == np.uint16: return np.asarray(img, dtype) / 65535. elif img.dtype == np.float32", "= {} orientation = exif.get(0x0112) if orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation", "type. Parameters ---------- img: ndarray The input image. dtype: np.dtype The type that", "or `num_cols` can be -1, which will be inferred such that the output", "= int(round(img.shape[0] * size)) num_cols = int(round(img.shape[1] * size)) return skimage.transform.resize(img, (num_rows, num_cols))", "of output mosaic as `(num_icons_per_row, num_icons_per_col)`. If not specified, try to make a", "* 255. / 100. dst[:,:,1] = img[:,:,1] + 128. dst[:,:,2] = img[:,:,2] +", "same aspect ratio as the input. Returns ------- The resized image. \"\"\" if", "cast into. color_space: string, optional The color space of the input image, which", "np.uint8) elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img * 255.,", "* `np.float32` and `np.float64`: left as is. \"\"\" if img.dtype == dtype: return", "np.float64: if img.dtype == np.uint8: dst = np.empty(img.shape, dtype) dst[:,:,0] = np.asarray(img[:,:,0], dtype)", "to determine its dimension. Parameters ---------- filename: string The input image file. Returns", "== \"CIE-L*a*b*\": if dtype == np.uint8: if img.dtype == np.float32 or img.dtype ==", "dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100. dst[:,:,1] = np.asarray(img[:,:,1], dtype) -", "---------- filename: string The input image file. Returns ------- The 2-tuple for image", "image to be cast into. color_space: string, optional The color space of the", "np.asarray(img * 255., np.uint8) elif dtype == np.uint16: if img.dtype == np.uint8: return", "image in `icons`. border_size: int, optional The size of border. border_color: 3-tuple, optional", "img.transpose(rotation[orientation0]) if flip[orientation0] is not None: img = img.transpose(flip[orientation0]) return imcast(np.array(img), dtype, color_space)", "optional The size of border. border_color: 3-tuple, optional The color of border, black", "num_channels = icon_shape[2] if border_color is None: border_color = np.zeros(num_channels) if empty_color is", "whole rasterized data) in order to determine its dimension. Parameters ---------- filename: string", "if color_space == \"default\": if dtype == np.uint8: if img.dtype == np.uint16: return", "the image followed by an :py:func:`imcast`.\"\"\" img = PIL.Image.open(filename) if img.mode != \"RGB\":", "`np.float32`, then the output will be `np.asarray(img / 255., np.float32)`. * For `color_space==\"CIE-L*a*b*\"`,", "dtype) dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100. dst[:,:,1] = np.asarray(img[:,:,1], dtype)", "image followed by an :py:func:`imcast`.\"\"\" img = PIL.Image.open(filename) if img.mode != \"RGB\": img", "/ img.shape[1] if num_cols < 0: num_cols = num_rows * img.shape[1] / img.shape[0]", "num_cols)) def create_icon_mosaic(icons, icon_shape=None, border_size=1, border_color=None, empty_color=None, mosaic_shape=None, mosaic_dtype=np.float): \"\"\"Create a mosaic of", "+ border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx] # Fill the empty icons with empty", "== np.uint8: if img.dtype == np.float32 or img.dtype == np.float64: dst = np.empty(img.shape,", "Fill the empty icons with empty colors. for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i", "`np.float32` and `np.float64`: `[0.0, 1.0]`. For example, if the input `img` is of", "= PIL.Image.open(filename) if img.mode != \"RGB\": img = img.convert(\"RGB\") if hasattr(img, \"_getexif\"): try:", "square mosaic according to number of icons. mosaic_dtype: dtype The data type of", "is None: empty_color = np.zeros(num_channels) if mosaic_shape is None: num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows", "if the input `img` is of `np.uint8` type and the expected `dtype` is", "/ img.shape[0] else: num_rows = int(round(img.shape[0] * size)) num_cols = int(round(img.shape[1] * size))", "img.convert(\"RGB\") if hasattr(img, \"_getexif\"): try: exif = img._getexif() or {} except IOError: exif", "orientation = exif.get(0x0112) if orientation: # see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation of the", "% mosaic_shape[1] iStart = i * (icon_shape[0] + border_size) jStart = j *", "\"CIE-L*a*b*\": if dtype == np.uint8: if img.dtype == np.float32 or img.dtype == np.float64:", "the input image, which affects the casting operation. Returns ------- The output image", "Returns ------- The output image that is cast into `dtype`. Notes ----- *", "utility functions to handle images.\"\"\" import math import numpy as np import PIL.Image", "\\ (img.dtype, dtype, color_space)) def imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read the image followed by", "np.float64: return np.asarray(img * 255., np.uint8) elif dtype == np.uint16: if img.dtype ==", "input image to a given data type. Parameters ---------- img: ndarray The input", "image to a given data type. Parameters ---------- img: ndarray The input image.", "size: a scalar for `scale` or a 2-tuple for `(num_rows, num_cols)` One of", "np.asarray(img[:,:,0], dtype) / 255. * 100. dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128. dst[:,:,2]", "of first image in `icons`. border_size: int, optional The size of border. border_color:", "Returns ------- The created mosaic image. \"\"\" # Set default parameters. num_icons =", "return np.asarray(img * 255., np.uint8) elif dtype == np.uint16: if img.dtype == np.uint8:", "perform the following cast: * `np.uint8`: `L <- L * 255 / 100,", "the input icons. for idx in xrange(num_icons): i = idx / mosaic_shape[1] j", "The 2-tuple for image size `(num_rows, num_cols)`. \"\"\" with PIL.Image.open(filename) as img: width,", "image that is cast into `dtype`. Notes ----- * For `color_space==\"default\"`, we perform", "border_size, mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size, icon_shape[2]) # Create mosaic image", "that output image to be cast into. color_space: string, optional The color space", "* For `color_space==\"default\"`, we perform a linear scaling with following range conventions: *", "np.asarray(img / 257, np.uint8) elif img.dtype == np.float32 or img.dtype == np.float64: return", "for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i = idx / mosaic_shape[1] j = idx", "only loads but the image header (rather than the whole rasterized data) in", "* `np.float32` and `np.float64`: `[0.0, 1.0]`. For example, if the input `img` is", "file. Returns ------- The 2-tuple for image size `(num_rows, num_cols)`. \"\"\" with PIL.Image.open(filename)", "the expected `dtype` is `np.float32`, then the output will be `np.asarray(img / 255.,", "aspect ratio as the input. Returns ------- The resized image. \"\"\" if hasattr(size,", "is None: num_cols = int(math.ceil(math.sqrt(num_icons))) num_rows = int(math.ceil(float(num_icons) / num_cols)) mosaic_shape = (num_rows,", "in PIL orientation = int(orientation) rotation = [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270,", "np.zeros(num_channels) if empty_color is None: empty_color = np.zeros(num_channels) if mosaic_shape is None: num_cols", "---------- icons: a list of `ndarray`s A list of icons to be put", "== np.float64: return np.asarray(img, dtype) elif color_space == \"CIE-L*a*b*\": if dtype == np.uint8:", "`num_cols` can be -1, which will be inferred such that the output image", "-127 <= a, b <= 127`, and we perform the following cast: *", "image header (rather than the whole rasterized data) in order to determine its", "= idx % mosaic_shape[1] iStart = i * (icon_shape[0] + border_size) jStart =", "an :py:func:`imcast`.\"\"\" img = PIL.Image.open(filename) if img.mode != \"RGB\": img = img.convert(\"RGB\") if", "idx / mosaic_shape[1] j = idx % mosaic_shape[1] iStart = i * (icon_shape[0]", "output will be `np.asarray(img / 255., np.float32)`. * For `color_space==\"CIE-L*a*b*\"`, the \"normal\" value", "`np.uint8`: `[0, 255]`; * `np.uint16`: `[0, 65535]`; * `np.float32` and `np.float64`: `[0.0, 1.0]`.", "cells, black if not specified. mosaic_shape: 2-tuple, optional The shape of output mosaic", "border_size) jStart = j * (icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx] #", "np.uint8: if img.dtype == np.float32 or img.dtype == np.float64: dst = np.empty(img.shape, np.uint8)", "---------- img: ndarray The input image to be resized. size: a scalar for", "jStart = j * (icon_shape[1] + border_size) mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],:] = icons[idx] # Fill", "flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None] orientation0 = orientation", "parameters. num_icons = len(icons) assert num_icons > 0 if icon_shape is None: icon_shape", "output image to be cast into. color_space: string, optional The color space of", "3-tuple, optional The color of border, black if not specified. empty_color: 3-tuple, optional", "and we perform the following cast: * `np.uint8`: `L <- L * 255", "return imcast(np.array(img), dtype, color_space) def imwrite(filename, img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an :py:func:`imcast` before", "def image_size_from_file(filename): \"\"\"Read the image size from a file. This function only loads", "xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c] return mosaic_image def image_size_from_file(filename): \"\"\"Read the image size", "mosaic_shape[1] iStart = i * (icon_shape[0] + border_size) jStart = j * (icon_shape[1]", "dtype, color_space)) def imread(filename, dtype=np.uint8, color_space=\"default\"): \"\"\"Read the image followed by an :py:func:`imcast`.\"\"\"", "`dtype` is `np.float32`, then the output will be `np.asarray(img / 255., np.float32)`. *", "<= 127`, and we perform the following cast: * `np.uint8`: `L <- L", "* img.shape[1] / img.shape[0] else: num_rows = int(round(img.shape[0] * size)) num_cols = int(round(img.shape[1]", "in `icons`. border_size: int, optional The size of border. border_color: 3-tuple, optional The", "operation. Returns ------- The output image that is cast into `dtype`. Notes -----", "in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c] return mosaic_image def image_size_from_file(filename): \"\"\"Read the image", "size assert (num_rows > 0) or (num_cols > 0) if num_rows < 0:", "use the shape of first image in `icons`. border_size: int, optional The size", "L <= 100, -127 <= a, b <= 127`, and we perform the", "= icons[idx] # Fill the empty icons with empty colors. for idx in", "jStart:jStart+icon_shape[1],c] = empty_color[c] return mosaic_image def image_size_from_file(filename): \"\"\"Read the image size from a", "as np import PIL.Image from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import", "> 0) or (num_cols > 0) if num_rows < 0: num_rows = num_cols", "default parameters. num_icons = len(icons) assert num_icons > 0 if icon_shape is None:", "than the whole rasterized data) in order to determine its dimension. Parameters ----------", "to '%s' with '%s' color space\" % \\ (img.dtype, dtype, color_space)) def imread(filename,", "icons[idx] # Fill the empty icons with empty colors. for idx in xrange(num_icons,", "== np.uint8: if img.dtype == np.uint16: return np.asarray(img / 257, np.uint8) elif img.dtype", "img.shape[0] else: num_rows = int(round(img.shape[0] * size)) num_cols = int(round(img.shape[1] * size)) return", "and the expected `dtype` is `np.float32`, then the output will be `np.asarray(img /", "/ 100, a <- a + 128, b <- b + 128`; *", "= orientation - 1 # it's 1-indexed per the EXIF spec if 0", "color of border, black if not specified. empty_color: 3-tuple, optional The color for", "* icon_shape[0] + (mosaic_shape[0]-1) * border_size, mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size,", "dtype, color_space) def imwrite(filename, img, dtype=np.uint8, color_space=\"default\"): \"\"\"Perform an :py:func:`imcast` before writing to", "are `0 <= L <= 100, -127 <= a, b <= 127`, and", "be cast into. color_space: string, optional The color space of the input image,", "assert (num_rows > 0) or (num_cols > 0) if num_rows < 0: num_rows", "supported; * `np.float32` and `np.float64`: left as is. \"\"\" if img.dtype == dtype:", "The color space of the input image, which affects the casting operation. Returns", "None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90] flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM,", "specified, use the shape of first image in `icons`. border_size: int, optional The", "return np.asarray(img * 65535., np.uint16) elif dtype == np.float32 or dtype == np.float64:", "== np.float64: if img.dtype == np.uint8: return np.asarray(img, dtype) / 255. elif img.dtype", "xrange(num_icons): i = idx / mosaic_shape[1] j = idx % mosaic_shape[1] iStart =", "dst[:,:,0] = img[:,:,0] * 255. / 100. dst[:,:,1] = img[:,:,1] + 128. dst[:,:,2]", "output file.\"\"\" import scipy.misc return scipy.misc.imsave(filename, imcast(img, dtype, color_space)) def imresize(img, size): \"\"\"Resize", "mosaic_dtype: dtype The data type of output mosaic. Returns ------- The created mosaic", "perform a linear scaling with following range conventions: * `np.uint8`: `[0, 255]`; *", "2-tuple for image size `(num_rows, num_cols)`. \"\"\" with PIL.Image.open(filename) as img: width, height", "ndarray The input image. dtype: np.dtype The type that output image to be", "255., np.uint8) elif dtype == np.uint16: if img.dtype == np.uint8: return np.asarray(img, np.uint16)", "np.float64: return np.asarray(img, dtype) elif color_space == \"CIE-L*a*b*\": if dtype == np.uint8: if", "0 if icon_shape is None: icon_shape = icons[0].shape assert len(icon_shape) == 3 num_channels", "np.asarray(img[:,:,1], dtype) - 128. dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128. return dst raise", "dtype: np.dtype The type that output image to be cast into. color_space: string,", "FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None, FLIP_LEFT_RIGHT, None] orientation0 = orientation - 1 #", "the output image has the same aspect ratio as the input. Returns -------", "\"default\": if dtype == np.uint8: if img.dtype == np.uint16: return np.asarray(img / 257,", "np.asarray(img, dtype) / 255. elif img.dtype == np.uint16: return np.asarray(img, dtype) / 65535.", "= len(icons) assert num_icons > 0 if icon_shape is None: icon_shape = icons[0].shape", "* 100. dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128. dst[:,:,2] = np.asarray(img[:,:,2], dtype) -", "to be cast into. color_space: string, optional The color space of the input", "None] orientation0 = orientation - 1 # it's 1-indexed per the EXIF spec", "= idx / mosaic_shape[1] j = idx % mosaic_shape[1] iStart = i *", "/ mosaic_shape[1] j = idx % mosaic_shape[1] iStart = i * (icon_shape[0] +", "EXIF spec if 0 <= orientation0 < len(rotation): if rotation[orientation0] is not None:", "dst = np.empty(img.shape, np.uint8) dst[:,:,0] = img[:,:,0] * 255. / 100. dst[:,:,1] =", "ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT import skimage.transform def imcast(img, dtype, color_space=\"default\"): \"\"\"Cast the", "be -1, which will be inferred such that the output image has the", "conventions: * `np.uint8`: `[0, 255]`; * `np.uint16`: `[0, 65535]`; * `np.float32` and `np.float64`:", "0: num_rows = num_cols * img.shape[0] / img.shape[1] if num_cols < 0: num_cols", "into `dtype`. Notes ----- * For `color_space==\"default\"`, we perform a linear scaling with", "100. dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128. dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128.", "into. color_space: string, optional The color space of the input image, which affects", "np.empty(img.shape, np.uint8) dst[:,:,0] = img[:,:,0] * 255. / 100. dst[:,:,1] = img[:,:,1] +", "* (icon_shape[0] + border_size) jStart = j * (icon_shape[1] + border_size) for c", "np.float32)`. * For `color_space==\"CIE-L*a*b*\"`, the \"normal\" value ranges are `0 <= L <=", "mosaic. Currently we require all icons to be multi-channel images of the same", "to make a square mosaic according to number of icons. mosaic_dtype: dtype The", "image and fill with border color. mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype) for c in", "= [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90] flip = [None, FLIP_LEFT_RIGHT,", "+ border_size) for c in xrange(mosaic_image.shape[2]): mosaic_image[iStart:iStart+icon_shape[0], jStart:jStart+icon_shape[1],c] = empty_color[c] return mosaic_image def", "img.mode != \"RGB\": img = img.convert(\"RGB\") if hasattr(img, \"_getexif\"): try: exif = img._getexif()", "if img.dtype == dtype: return img if color_space == \"default\": if dtype ==", "None, FLIP_LEFT_RIGHT, None] orientation0 = orientation - 1 # it's 1-indexed per the", "see http://jpegclub.org/exif_orientation.html for a nice visual explanation # also, rotations are counter-clockwise in", "\"normal\" value ranges are `0 <= L <= 100, -127 <= a, b", "the \"normal\" value ranges are `0 <= L <= 100, -127 <= a,", "be put together for mosaic. Currently we require all icons to be multi-channel", "b <- b + 128`; * `np.uint16`: currently not supported; * `np.float32` and", "colors. for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]): i = idx / mosaic_shape[1] j =", "rasterized data) in order to determine its dimension. Parameters ---------- filename: string The", "import skimage.transform def imcast(img, dtype, color_space=\"default\"): \"\"\"Cast the input image to a given", "dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128. return dst raise Exception( \"Unexpected conversion from", "`scale` or a 2-tuple for `(num_rows, num_cols)` One of the `num_rows` or `num_cols`", "empty_color: 3-tuple, optional The color for empty cells, black if not specified. mosaic_shape:", "if img.dtype == np.uint8: dst = np.empty(img.shape, dtype) dst[:,:,0] = np.asarray(img[:,:,0], dtype) /", "np.uint16: if img.dtype == np.uint8: return np.asarray(img, np.uint16) * 257 elif img.dtype ==", "exif = img._getexif() or {} except IOError: exif = {} orientation = exif.get(0x0112)", "of the `num_rows` or `num_cols` can be -1, which will be inferred such", "the output will be `np.asarray(img / 255., np.float32)`. * For `color_space==\"CIE-L*a*b*\"`, the \"normal\"", "is not None: img = img.transpose(flip[orientation0]) return imcast(np.array(img), dtype, color_space) def imwrite(filename, img,", "# see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html # for explanation of the magical constants # or see", "of `np.uint8` type and the expected `dtype` is `np.float32`, then the output will", "> 0) if num_rows < 0: num_rows = num_cols * img.shape[0] / img.shape[1]", "size)) num_cols = int(round(img.shape[1] * size)) return skimage.transform.resize(img, (num_rows, num_cols)) def create_icon_mosaic(icons, icon_shape=None,", "img.shape[1] if num_cols < 0: num_cols = num_rows * img.shape[1] / img.shape[0] else:", "65535]`; * `np.float32` and `np.float64`: `[0.0, 1.0]`. For example, if the input `img`", "range conventions: * `np.uint8`: `[0, 255]`; * `np.uint16`: `[0, 65535]`; * `np.float32` and", "* border_size, icon_shape[2]) # Create mosaic image and fill with border color. mosaic_image", "65535. elif img.dtype == np.float32 or img.dtype == np.float64: return np.asarray(img, dtype) elif", "followed by an :py:func:`imcast`.\"\"\" img = PIL.Image.open(filename) if img.mode != \"RGB\": img =", "dtype) / 255. elif img.dtype == np.uint16: return np.asarray(img, dtype) / 65535. elif" ]
[ "0.05 if minimum_pbs is None: minimum_pbs = this_pbs elif len(minimum_pbs) > len(this_pbs): minimum_pbs", "'__main__': # parameters file_name = 'default.txt' epoch_num = 1000 max_trial = 5000 gamma", "j in range(y): Q[(i, j)] = {_:np.random.normal() for _ in COMMAND} #Q[(i, j)]", "for this_line in ret: print(''.join(this_line)) if __name__ == '__main__': # parameters file_name =", "for _ in COMMAND} #Q[(i, j)] = {_:0.0 for _ in COMMAND} #", "range(max_trial): # get current current_x, current_y = sim.get_current() # select_command tmp_Q = Q[(current_x,", "next_y)][next_max_command] tmp_Q[command] += alpha * (reward + gamma * next_value - current_value) #", "j)] = {_:0.0 for _ in COMMAND} # main minimum_pbs = None for", "current_value = tmp_Q[pb.action] next_Q = Q[pb.next_state] next_max_command = get_max_command(next_Q) next_value = next_Q[next_max_command] tmp_Q[pb.action]", "in range(max_trial): # get current current_x, current_y = sim.get_current() # select_command tmp_Q =", "k,v in target_dict.items()])[1] def simplify(command): return command[0] def print_Q(Q, x, y): ret =", "0.5 # make simulater sim = Simulater(file_name) # initialize Q value x, y", "is None: minimum_pbs = this_pbs elif len(minimum_pbs) > len(this_pbs): minimum_pbs = this_pbs print(epsilon)", "in range(epoch_num): sim.reset() this_pbs = PlayBacks() for i in range(max_trial): # get current", "x, y): ret = [] for i in range(y): ret.append(['0' for _ in", "i in range(y): ret.append(['0' for _ in range(x)]) for k in Q: ret[k[1]][k[0]]", "for j in range(y): Q[(i, j)] = {_:np.random.normal() for _ in COMMAND} #Q[(i,", "alpha * (reward + gamma * next_value - current_value) # play back this_pbs.append(PlayBack((current_x,", "command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND) current_value = tmp_Q[command] #", "current_value = tmp_Q[command] # reward reward = sim(command) # update next_x, next_y =", "x, y = sim.map_size() Q = {} for i in range(x): for j", "print_Q(Q, x, y): ret = [] for i in range(y): ret.append(['0' for _", "ret.append(['0' for _ in range(x)]) for k in Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for", "next_Q = Q[pb.next_state] next_max_command = get_max_command(next_Q) next_value = next_Q[next_max_command] tmp_Q[pb.action] += alpha *", "-*- import os import sys import numpy as np from simulater import Simulater", "(reward + gamma * next_value - current_value) # play back this_pbs.append(PlayBack((current_x, current_y), command,", "file_name = 'default.txt' epoch_num = 1000 max_trial = 5000 gamma = 0.1 alpha", "tmp_Q[command] # reward reward = sim(command) # update next_x, next_y = sim.get_current() next_max_command", "this_pbs.append(PlayBack((current_x, current_y), command, (next_x, next_y), reward)) # end check if sim.end_episode(): print('find goal')", "_ in COMMAND} # main minimum_pbs = None for epoch in range(epoch_num): sim.reset()", "if epsilon < 0.05: epsilon = 0.05 if minimum_pbs is None: minimum_pbs =", "minimum_pbs is not None: for pb in minimum_pbs: tmp_Q = Q[pb.state] current_value =", "* next_value - current_value) # play back this_pbs.append(PlayBack((current_x, current_y), command, (next_x, next_y), reward))", "for epoch in range(epoch_num): sim.reset() this_pbs = PlayBacks() for i in range(max_trial): #", "*= 0.95 if epsilon < 0.05: epsilon = 0.05 if minimum_pbs is None:", "# end check if sim.end_episode(): print('find goal') epsilon *= 0.95 if epsilon <", "> len(this_pbs): minimum_pbs = this_pbs print(epsilon) break # update with minimum_pbs if minimum_pbs", "return max([(v,k) for k,v in target_dict.items()])[1] def simplify(command): return command[0] def print_Q(Q, x,", "alpha * (pb.reward + gamma * next_value - current_value) sim.printing() print('---') print_Q(Q, x,", "next_y = sim.get_current() next_max_command = get_max_command(Q[(next_x, next_y)]) next_value = Q[(next_x, next_y)][next_max_command] tmp_Q[command] +=", "len(this_pbs): minimum_pbs = this_pbs print(epsilon) break # update with minimum_pbs if minimum_pbs is", "# update next_x, next_y = sim.get_current() next_max_command = get_max_command(Q[(next_x, next_y)]) next_value = Q[(next_x,", "epoch_num = 1000 max_trial = 5000 gamma = 0.1 alpha = 0.1 epsilon", "max([(v,k) for k,v in target_dict.items()])[1] def simplify(command): return command[0] def print_Q(Q, x, y):", "tmp_Q[pb.action] next_Q = Q[pb.next_state] next_max_command = get_max_command(next_Q) next_value = next_Q[next_max_command] tmp_Q[pb.action] += alpha", "from simulater import Simulater from play_back import PlayBack, PlayBacks COMMAND = ['UP', 'DOWN',", "= Q[(next_x, next_y)][next_max_command] tmp_Q[command] += alpha * (reward + gamma * next_value -", "Simulater(file_name) # initialize Q value x, y = sim.map_size() Q = {} for", "# get current current_x, current_y = sim.get_current() # select_command tmp_Q = Q[(current_x, current_y)]", "Q = {} for i in range(x): for j in range(y): Q[(i, j)]", "as np from simulater import Simulater from play_back import PlayBack, PlayBacks COMMAND =", "j)] = {_:np.random.normal() for _ in COMMAND} #Q[(i, j)] = {_:0.0 for _", "current_y = sim.get_current() # select_command tmp_Q = Q[(current_x, current_y)] command = get_max_command(tmp_Q) if", "[] for i in range(y): ret.append(['0' for _ in range(x)]) for k in", "elif len(minimum_pbs) > len(this_pbs): minimum_pbs = this_pbs print(epsilon) break # update with minimum_pbs", "range(x)]) for k in Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for this_line in ret: print(''.join(this_line))", "make simulater sim = Simulater(file_name) # initialize Q value x, y = sim.map_size()", "# update with minimum_pbs if minimum_pbs is not None: for pb in minimum_pbs:", "COMMAND} # main minimum_pbs = None for epoch in range(epoch_num): sim.reset() this_pbs =", "range(x): for j in range(y): Q[(i, j)] = {_:np.random.normal() for _ in COMMAND}", "minimum_pbs: tmp_Q = Q[pb.state] current_value = tmp_Q[pb.action] next_Q = Q[pb.next_state] next_max_command = get_max_command(next_Q)", "target_dict.items()])[1] def simplify(command): return command[0] def print_Q(Q, x, y): ret = [] for", "= {} for i in range(x): for j in range(y): Q[(i, j)] =", "epsilon = 0.5 # make simulater sim = Simulater(file_name) # initialize Q value", "(pb.reward + gamma * next_value - current_value) sim.printing() print('---') print_Q(Q, x, y) print('---')", "= sim.map_size() Q = {} for i in range(x): for j in range(y):", "next_value = Q[(next_x, next_y)][next_max_command] tmp_Q[command] += alpha * (reward + gamma * next_value", "_ in range(x)]) for k in Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for this_line in", "if sim.end_episode(): print('find goal') epsilon *= 0.95 if epsilon < 0.05: epsilon =", "ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for this_line in ret: print(''.join(this_line)) if __name__ == '__main__': #", "for pb in minimum_pbs: tmp_Q = Q[pb.state] current_value = tmp_Q[pb.action] next_Q = Q[pb.next_state]", "+= alpha * (pb.reward + gamma * next_value - current_value) sim.printing() print('---') print_Q(Q,", "import Simulater from play_back import PlayBack, PlayBacks COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT']", "# initialize Q value x, y = sim.map_size() Q = {} for i", "= tmp_Q[command] # reward reward = sim(command) # update next_x, next_y = sim.get_current()", "this_pbs elif len(minimum_pbs) > len(this_pbs): minimum_pbs = this_pbs print(epsilon) break # update with", "PlayBacks() for i in range(max_trial): # get current current_x, current_y = sim.get_current() #", "y = sim.map_size() Q = {} for i in range(x): for j in", "next_max_command = get_max_command(Q[(next_x, next_y)]) next_value = Q[(next_x, next_y)][next_max_command] tmp_Q[command] += alpha * (reward", "+= alpha * (reward + gamma * next_value - current_value) # play back", "= Q[(current_x, current_y)] command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND) current_value", "in ret: print(''.join(this_line)) if __name__ == '__main__': # parameters file_name = 'default.txt' epoch_num", "- current_value) # play back this_pbs.append(PlayBack((current_x, current_y), command, (next_x, next_y), reward)) # end", "next_value = next_Q[next_max_command] tmp_Q[pb.action] += alpha * (pb.reward + gamma * next_value -", "k in Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for this_line in ret: print(''.join(this_line)) if __name__", "= 0.1 alpha = 0.1 epsilon = 0.5 # make simulater sim =", "if np.random.uniform() > epsilon else np.random.choice(COMMAND) current_value = tmp_Q[command] # reward reward =", "Q[(next_x, next_y)][next_max_command] tmp_Q[command] += alpha * (reward + gamma * next_value - current_value)", "def print_Q(Q, x, y): ret = [] for i in range(y): ret.append(['0' for", "import PlayBack, PlayBacks COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT'] def get_max_command(target_dict): return max([(v,k)", "_ in COMMAND} #Q[(i, j)] = {_:0.0 for _ in COMMAND} # main", "= 0.1 epsilon = 0.5 # make simulater sim = Simulater(file_name) # initialize", "epsilon else np.random.choice(COMMAND) current_value = tmp_Q[command] # reward reward = sim(command) # update", "= Q[pb.next_state] next_max_command = get_max_command(next_Q) next_value = next_Q[next_max_command] tmp_Q[pb.action] += alpha * (pb.reward", "back this_pbs.append(PlayBack((current_x, current_y), command, (next_x, next_y), reward)) # end check if sim.end_episode(): print('find", "minimum_pbs = this_pbs print(epsilon) break # update with minimum_pbs if minimum_pbs is not", "= get_max_command(next_Q) next_value = next_Q[next_max_command] tmp_Q[pb.action] += alpha * (pb.reward + gamma *", "{_:np.random.normal() for _ in COMMAND} #Q[(i, j)] = {_:0.0 for _ in COMMAND}", "None: minimum_pbs = this_pbs elif len(minimum_pbs) > len(this_pbs): minimum_pbs = this_pbs print(epsilon) break", "in range(y): ret.append(['0' for _ in range(x)]) for k in Q: ret[k[1]][k[0]] =", "in target_dict.items()])[1] def simplify(command): return command[0] def print_Q(Q, x, y): ret = []", "current_y)] command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND) current_value = tmp_Q[command]", "Simulater from play_back import PlayBack, PlayBacks COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT'] def", "def get_max_command(target_dict): return max([(v,k) for k,v in target_dict.items()])[1] def simplify(command): return command[0] def", "= Q[pb.state] current_value = tmp_Q[pb.action] next_Q = Q[pb.next_state] next_max_command = get_max_command(next_Q) next_value =", "ret = [] for i in range(y): ret.append(['0' for _ in range(x)]) for", "COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT'] def get_max_command(target_dict): return max([(v,k) for k,v in", "reward reward = sim(command) # update next_x, next_y = sim.get_current() next_max_command = get_max_command(Q[(next_x,", "Q[pb.next_state] next_max_command = get_max_command(next_Q) next_value = next_Q[next_max_command] tmp_Q[pb.action] += alpha * (pb.reward +", "tmp_Q = Q[(current_x, current_y)] command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND)", "for _ in COMMAND} # main minimum_pbs = None for epoch in range(epoch_num):", "current current_x, current_y = sim.get_current() # select_command tmp_Q = Q[(current_x, current_y)] command =", "next_Q[next_max_command] tmp_Q[pb.action] += alpha * (pb.reward + gamma * next_value - current_value) sim.printing()", "sim = Simulater(file_name) # initialize Q value x, y = sim.map_size() Q =", "= 0.05 if minimum_pbs is None: minimum_pbs = this_pbs elif len(minimum_pbs) > len(this_pbs):", "Q[pb.state] current_value = tmp_Q[pb.action] next_Q = Q[pb.next_state] next_max_command = get_max_command(next_Q) next_value = next_Q[next_max_command]", "current_y), command, (next_x, next_y), reward)) # end check if sim.end_episode(): print('find goal') epsilon", "= None for epoch in range(epoch_num): sim.reset() this_pbs = PlayBacks() for i in", "import sys import numpy as np from simulater import Simulater from play_back import", "Q[(current_x, current_y)] command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND) current_value =", "reward = sim(command) # update next_x, next_y = sim.get_current() next_max_command = get_max_command(Q[(next_x, next_y)])", "if __name__ == '__main__': # parameters file_name = 'default.txt' epoch_num = 1000 max_trial", "select_command tmp_Q = Q[(current_x, current_y)] command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else", "= tmp_Q[pb.action] next_Q = Q[pb.next_state] next_max_command = get_max_command(next_Q) next_value = next_Q[next_max_command] tmp_Q[pb.action] +=", "+ gamma * next_value - current_value) # play back this_pbs.append(PlayBack((current_x, current_y), command, (next_x,", "command[0] def print_Q(Q, x, y): ret = [] for i in range(y): ret.append(['0'", "range(epoch_num): sim.reset() this_pbs = PlayBacks() for i in range(max_trial): # get current current_x,", "initialize Q value x, y = sim.map_size() Q = {} for i in", "if minimum_pbs is None: minimum_pbs = this_pbs elif len(minimum_pbs) > len(this_pbs): minimum_pbs =", "__name__ == '__main__': # parameters file_name = 'default.txt' epoch_num = 1000 max_trial =", "value x, y = sim.map_size() Q = {} for i in range(x): for", "= get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND) current_value = tmp_Q[command] # reward", "PlayBacks COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT'] def get_max_command(target_dict): return max([(v,k) for k,v", "Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for this_line in ret: print(''.join(this_line)) if __name__ == '__main__':", "# -*- coding:utf-8 -*- import os import sys import numpy as np from", "range(y): Q[(i, j)] = {_:np.random.normal() for _ in COMMAND} #Q[(i, j)] = {_:0.0", "next_y)]) next_value = Q[(next_x, next_y)][next_max_command] tmp_Q[command] += alpha * (reward + gamma *", "minimum_pbs = None for epoch in range(epoch_num): sim.reset() this_pbs = PlayBacks() for i", "os import sys import numpy as np from simulater import Simulater from play_back", "sim.get_current() next_max_command = get_max_command(Q[(next_x, next_y)]) next_value = Q[(next_x, next_y)][next_max_command] tmp_Q[command] += alpha *", "with minimum_pbs if minimum_pbs is not None: for pb in minimum_pbs: tmp_Q =", "simulater sim = Simulater(file_name) # initialize Q value x, y = sim.map_size() Q", "'LEFT', 'RIGHT'] def get_max_command(target_dict): return max([(v,k) for k,v in target_dict.items()])[1] def simplify(command): return", "# select_command tmp_Q = Q[(current_x, current_y)] command = get_max_command(tmp_Q) if np.random.uniform() > epsilon", "epsilon < 0.05: epsilon = 0.05 if minimum_pbs is None: minimum_pbs = this_pbs", "COMMAND} #Q[(i, j)] = {_:0.0 for _ in COMMAND} # main minimum_pbs =", "'RIGHT'] def get_max_command(target_dict): return max([(v,k) for k,v in target_dict.items()])[1] def simplify(command): return command[0]", "return command[0] def print_Q(Q, x, y): ret = [] for i in range(y):", "is not None: for pb in minimum_pbs: tmp_Q = Q[pb.state] current_value = tmp_Q[pb.action]", "= ['UP', 'DOWN', 'LEFT', 'RIGHT'] def get_max_command(target_dict): return max([(v,k) for k,v in target_dict.items()])[1]", "gamma = 0.1 alpha = 0.1 epsilon = 0.5 # make simulater sim", "tmp_Q[pb.action] += alpha * (pb.reward + gamma * next_value - current_value) sim.printing() print('---')", "for _ in range(x)]) for k in Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for this_line", "epsilon *= 0.95 if epsilon < 0.05: epsilon = 0.05 if minimum_pbs is", "this_pbs print(epsilon) break # update with minimum_pbs if minimum_pbs is not None: for", "1000 max_trial = 5000 gamma = 0.1 alpha = 0.1 epsilon = 0.5", "= {_:0.0 for _ in COMMAND} # main minimum_pbs = None for epoch", "sim(command) # update next_x, next_y = sim.get_current() next_max_command = get_max_command(Q[(next_x, next_y)]) next_value =", "= next_Q[next_max_command] tmp_Q[pb.action] += alpha * (pb.reward + gamma * next_value - current_value)", "for k,v in target_dict.items()])[1] def simplify(command): return command[0] def print_Q(Q, x, y): ret", "# make simulater sim = Simulater(file_name) # initialize Q value x, y =", "next_x, next_y = sim.get_current() next_max_command = get_max_command(Q[(next_x, next_y)]) next_value = Q[(next_x, next_y)][next_max_command] tmp_Q[command]", "pb in minimum_pbs: tmp_Q = Q[pb.state] current_value = tmp_Q[pb.action] next_Q = Q[pb.next_state] next_max_command", "parameters file_name = 'default.txt' epoch_num = 1000 max_trial = 5000 gamma = 0.1", "> epsilon else np.random.choice(COMMAND) current_value = tmp_Q[command] # reward reward = sim(command) #", "sim.get_current() # select_command tmp_Q = Q[(current_x, current_y)] command = get_max_command(tmp_Q) if np.random.uniform() >", "get_max_command(Q[(next_x, next_y)]) next_value = Q[(next_x, next_y)][next_max_command] tmp_Q[command] += alpha * (reward + gamma", "= simplify(get_max_command(Q[k])) for this_line in ret: print(''.join(this_line)) if __name__ == '__main__': # parameters", "break # update with minimum_pbs if minimum_pbs is not None: for pb in", "for i in range(max_trial): # get current current_x, current_y = sim.get_current() # select_command", "(next_x, next_y), reward)) # end check if sim.end_episode(): print('find goal') epsilon *= 0.95", "in range(y): Q[(i, j)] = {_:np.random.normal() for _ in COMMAND} #Q[(i, j)] =", "epoch in range(epoch_num): sim.reset() this_pbs = PlayBacks() for i in range(max_trial): # get", "print('find goal') epsilon *= 0.95 if epsilon < 0.05: epsilon = 0.05 if", "in COMMAND} # main minimum_pbs = None for epoch in range(epoch_num): sim.reset() this_pbs", "current_x, current_y = sim.get_current() # select_command tmp_Q = Q[(current_x, current_y)] command = get_max_command(tmp_Q)", "if minimum_pbs is not None: for pb in minimum_pbs: tmp_Q = Q[pb.state] current_value", "= 1000 max_trial = 5000 gamma = 0.1 alpha = 0.1 epsilon =", "gamma * next_value - current_value) # play back this_pbs.append(PlayBack((current_x, current_y), command, (next_x, next_y),", "= 0.5 # make simulater sim = Simulater(file_name) # initialize Q value x,", "in range(x): for j in range(y): Q[(i, j)] = {_:np.random.normal() for _ in", "minimum_pbs = this_pbs elif len(minimum_pbs) > len(this_pbs): minimum_pbs = this_pbs print(epsilon) break #", "for i in range(x): for j in range(y): Q[(i, j)] = {_:np.random.normal() for", "get_max_command(target_dict): return max([(v,k) for k,v in target_dict.items()])[1] def simplify(command): return command[0] def print_Q(Q,", "= [] for i in range(y): ret.append(['0' for _ in range(x)]) for k", "reward)) # end check if sim.end_episode(): print('find goal') epsilon *= 0.95 if epsilon", "in COMMAND} #Q[(i, j)] = {_:0.0 for _ in COMMAND} # main minimum_pbs", "None: for pb in minimum_pbs: tmp_Q = Q[pb.state] current_value = tmp_Q[pb.action] next_Q =", "sim.end_episode(): print('find goal') epsilon *= 0.95 if epsilon < 0.05: epsilon = 0.05", "len(minimum_pbs) > len(this_pbs): minimum_pbs = this_pbs print(epsilon) break # update with minimum_pbs if", "np.random.choice(COMMAND) current_value = tmp_Q[command] # reward reward = sim(command) # update next_x, next_y", "import numpy as np from simulater import Simulater from play_back import PlayBack, PlayBacks", "sys import numpy as np from simulater import Simulater from play_back import PlayBack,", "get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND) current_value = tmp_Q[command] # reward reward", "play back this_pbs.append(PlayBack((current_x, current_y), command, (next_x, next_y), reward)) # end check if sim.end_episode():", "= PlayBacks() for i in range(max_trial): # get current current_x, current_y = sim.get_current()", "Q value x, y = sim.map_size() Q = {} for i in range(x):", "epsilon = 0.05 if minimum_pbs is None: minimum_pbs = this_pbs elif len(minimum_pbs) >", "next_max_command = get_max_command(next_Q) next_value = next_Q[next_max_command] tmp_Q[pb.action] += alpha * (pb.reward + gamma", "# reward reward = sim(command) # update next_x, next_y = sim.get_current() next_max_command =", "simplify(get_max_command(Q[k])) for this_line in ret: print(''.join(this_line)) if __name__ == '__main__': # parameters file_name", "'default.txt' epoch_num = 1000 max_trial = 5000 gamma = 0.1 alpha = 0.1", "* (pb.reward + gamma * next_value - current_value) sim.printing() print('---') print_Q(Q, x, y)", "= this_pbs elif len(minimum_pbs) > len(this_pbs): minimum_pbs = this_pbs print(epsilon) break # update", "get_max_command(next_Q) next_value = next_Q[next_max_command] tmp_Q[pb.action] += alpha * (pb.reward + gamma * next_value", "0.1 alpha = 0.1 epsilon = 0.5 # make simulater sim = Simulater(file_name)", "0.95 if epsilon < 0.05: epsilon = 0.05 if minimum_pbs is None: minimum_pbs", "i in range(max_trial): # get current current_x, current_y = sim.get_current() # select_command tmp_Q", "numpy as np from simulater import Simulater from play_back import PlayBack, PlayBacks COMMAND", "check if sim.end_episode(): print('find goal') epsilon *= 0.95 if epsilon < 0.05: epsilon", "PlayBack, PlayBacks COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT'] def get_max_command(target_dict): return max([(v,k) for", "'DOWN', 'LEFT', 'RIGHT'] def get_max_command(target_dict): return max([(v,k) for k,v in target_dict.items()])[1] def simplify(command):", "= sim.get_current() # select_command tmp_Q = Q[(current_x, current_y)] command = get_max_command(tmp_Q) if np.random.uniform()", "# main minimum_pbs = None for epoch in range(epoch_num): sim.reset() this_pbs = PlayBacks()", "i in range(x): for j in range(y): Q[(i, j)] = {_:np.random.normal() for _", "get current current_x, current_y = sim.get_current() # select_command tmp_Q = Q[(current_x, current_y)] command", "def simplify(command): return command[0] def print_Q(Q, x, y): ret = [] for i", "for i in range(y): ret.append(['0' for _ in range(x)]) for k in Q:", "< 0.05: epsilon = 0.05 if minimum_pbs is None: minimum_pbs = this_pbs elif", "# parameters file_name = 'default.txt' epoch_num = 1000 max_trial = 5000 gamma =", "None for epoch in range(epoch_num): sim.reset() this_pbs = PlayBacks() for i in range(max_trial):", "#Q[(i, j)] = {_:0.0 for _ in COMMAND} # main minimum_pbs = None", "= sim(command) # update next_x, next_y = sim.get_current() next_max_command = get_max_command(Q[(next_x, next_y)]) next_value", "tmp_Q[command] += alpha * (reward + gamma * next_value - current_value) # play", "current_value) # play back this_pbs.append(PlayBack((current_x, current_y), command, (next_x, next_y), reward)) # end check", "import os import sys import numpy as np from simulater import Simulater from", "simplify(command): return command[0] def print_Q(Q, x, y): ret = [] for i in", "np from simulater import Simulater from play_back import PlayBack, PlayBacks COMMAND = ['UP',", "-*- coding:utf-8 -*- import os import sys import numpy as np from simulater", "Q[(i, j)] = {_:np.random.normal() for _ in COMMAND} #Q[(i, j)] = {_:0.0 for", "5000 gamma = 0.1 alpha = 0.1 epsilon = 0.5 # make simulater", "main minimum_pbs = None for epoch in range(epoch_num): sim.reset() this_pbs = PlayBacks() for", "{} for i in range(x): for j in range(y): Q[(i, j)] = {_:np.random.normal()", "simulater import Simulater from play_back import PlayBack, PlayBacks COMMAND = ['UP', 'DOWN', 'LEFT',", "minimum_pbs is None: minimum_pbs = this_pbs elif len(minimum_pbs) > len(this_pbs): minimum_pbs = this_pbs", "end check if sim.end_episode(): print('find goal') epsilon *= 0.95 if epsilon < 0.05:", "* (reward + gamma * next_value - current_value) # play back this_pbs.append(PlayBack((current_x, current_y),", "this_line in ret: print(''.join(this_line)) if __name__ == '__main__': # parameters file_name = 'default.txt'", "print(epsilon) break # update with minimum_pbs if minimum_pbs is not None: for pb", "update with minimum_pbs if minimum_pbs is not None: for pb in minimum_pbs: tmp_Q", "not None: for pb in minimum_pbs: tmp_Q = Q[pb.state] current_value = tmp_Q[pb.action] next_Q", "else np.random.choice(COMMAND) current_value = tmp_Q[command] # reward reward = sim(command) # update next_x,", "= get_max_command(Q[(next_x, next_y)]) next_value = Q[(next_x, next_y)][next_max_command] tmp_Q[command] += alpha * (reward +", "for k in Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for this_line in ret: print(''.join(this_line)) if", "== '__main__': # parameters file_name = 'default.txt' epoch_num = 1000 max_trial = 5000", "{_:0.0 for _ in COMMAND} # main minimum_pbs = None for epoch in", "this_pbs = PlayBacks() for i in range(max_trial): # get current current_x, current_y =", "next_y), reward)) # end check if sim.end_episode(): print('find goal') epsilon *= 0.95 if", "sim.reset() this_pbs = PlayBacks() for i in range(max_trial): # get current current_x, current_y", "print(''.join(this_line)) if __name__ == '__main__': # parameters file_name = 'default.txt' epoch_num = 1000", "play_back import PlayBack, PlayBacks COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT'] def get_max_command(target_dict): return", "['UP', 'DOWN', 'LEFT', 'RIGHT'] def get_max_command(target_dict): return max([(v,k) for k,v in target_dict.items()])[1] def", "max_trial = 5000 gamma = 0.1 alpha = 0.1 epsilon = 0.5 #", "alpha = 0.1 epsilon = 0.5 # make simulater sim = Simulater(file_name) #", "coding:utf-8 -*- import os import sys import numpy as np from simulater import", "update next_x, next_y = sim.get_current() next_max_command = get_max_command(Q[(next_x, next_y)]) next_value = Q[(next_x, next_y)][next_max_command]", "= sim.get_current() next_max_command = get_max_command(Q[(next_x, next_y)]) next_value = Q[(next_x, next_y)][next_max_command] tmp_Q[command] += alpha", "= Simulater(file_name) # initialize Q value x, y = sim.map_size() Q = {}", "= {_:np.random.normal() for _ in COMMAND} #Q[(i, j)] = {_:0.0 for _ in", "0.05: epsilon = 0.05 if minimum_pbs is None: minimum_pbs = this_pbs elif len(minimum_pbs)", "minimum_pbs if minimum_pbs is not None: for pb in minimum_pbs: tmp_Q = Q[pb.state]", "y): ret = [] for i in range(y): ret.append(['0' for _ in range(x)])", "= 5000 gamma = 0.1 alpha = 0.1 epsilon = 0.5 # make", "# play back this_pbs.append(PlayBack((current_x, current_y), command, (next_x, next_y), reward)) # end check if", "np.random.uniform() > epsilon else np.random.choice(COMMAND) current_value = tmp_Q[command] # reward reward = sim(command)", "from play_back import PlayBack, PlayBacks COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT'] def get_max_command(target_dict):", "next_value - current_value) # play back this_pbs.append(PlayBack((current_x, current_y), command, (next_x, next_y), reward)) #", "sim.map_size() Q = {} for i in range(x): for j in range(y): Q[(i,", "0.1 epsilon = 0.5 # make simulater sim = Simulater(file_name) # initialize Q", "in minimum_pbs: tmp_Q = Q[pb.state] current_value = tmp_Q[pb.action] next_Q = Q[pb.next_state] next_max_command =", "range(y): ret.append(['0' for _ in range(x)]) for k in Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k]))", "ret: print(''.join(this_line)) if __name__ == '__main__': # parameters file_name = 'default.txt' epoch_num =", "command, (next_x, next_y), reward)) # end check if sim.end_episode(): print('find goal') epsilon *=", "tmp_Q = Q[pb.state] current_value = tmp_Q[pb.action] next_Q = Q[pb.next_state] next_max_command = get_max_command(next_Q) next_value", "goal') epsilon *= 0.95 if epsilon < 0.05: epsilon = 0.05 if minimum_pbs", "= 'default.txt' epoch_num = 1000 max_trial = 5000 gamma = 0.1 alpha =", "= this_pbs print(epsilon) break # update with minimum_pbs if minimum_pbs is not None:", "in range(x)]) for k in Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for this_line in ret:", "in Q: ret[k[1]][k[0]] = simplify(get_max_command(Q[k])) for this_line in ret: print(''.join(this_line)) if __name__ ==" ]
[ "as workalendar returns only Date objects holiday_current = [datetime(dt.year, dt.month, dt.day) for dt", "in holiday_current_raw] holiday_next = [datetime(dt.year, dt.month, dt.day) for dt in holiday_next_raw] return holiday_current", "override. You can override holidays for a country and a year through usage", "sample: zh: 2016: '2016-01-01': 'New Years Day' '2016-02-07': 'Chinese New Years Eve' \"\"\"", "1 # retrieve Dates from workalendar holiday_current_raw = [dt for dt, _ in", "cal = klass() current_year = year or datetime.now().year next_year = current_year + 1", "import time import logging import calendar from datetime import datetime from workalendar.europe import", "1000 def get_holiday(user, year=None, use_datetime=False): \"\"\" return holidays for user country format is", "timestamp holiday_current = [utcify(dt) for dt in holiday_current_raw] holiday_next = [utcify(dt) for dt", "holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][current_year]] if user.country in override and", "if user.country in override and next_year in override[user.country]: holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d') for", "override and next_year in override[user.country]: holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][next_year]]", "cast to datetime as workalendar returns only Date objects holiday_current = [datetime(dt.year, dt.month,", "logging.getLogger(__file__) conv_table = { 'fr': France, 'us': California, 'zh': Taiwan, 'lu': Luxembourg, }", "in override and next_year in override[user.country]: holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in", "dt.month, dt.day) for dt in holiday_current_raw] holiday_next = [datetime(dt.year, dt.month, dt.day) for dt", "return holidays for user country format is unixtime for javascript \"\"\" klass =", "holiday_next = [utcify(dt) for dt in holiday_next_raw] else: # must cast to datetime", "retrieve Dates from workalendar holiday_current_raw = [dt for dt, _ in cal.holidays(current_year)] holiday_next_raw", "You can override holidays for a country and a year through usage of", "return override.update(content) def utcify(date): \"\"\" return an UTC datetime from a Date object", "and a year through usage of a configuration setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here", "Luxembourg, } override = {} def init_override(content): \"\"\"Load a yaml file for holidays", "conv_table = { 'fr': France, 'us': California, 'zh': Taiwan, 'lu': Luxembourg, } override", "a yaml file for holidays override. You can override holidays for a country", "datetime import datetime from workalendar.europe import France, Luxembourg from workalendar.usa import California from", "workalendar holiday_current_raw = [dt for dt, _ in cal.holidays(current_year)] holiday_next_raw = [dt for", "_ in cal.holidays(next_year)] if user.country in override and current_year in override[user.country]: holiday_current_raw =", "cal.holidays(current_year)] holiday_next_raw = [dt for dt, _ in cal.holidays(next_year)] if user.country in override", "# import time import logging import calendar from datetime import datetime from workalendar.europe", "[utcify(dt) for dt in holiday_current_raw] holiday_next = [utcify(dt) for dt in holiday_next_raw] else:", "of a configuration setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here is a sample: zh: 2016:", "for dt in override[user.country][next_year]] if not use_datetime: # must cast to javascript timestamp", "year=None, use_datetime=False): \"\"\" return holidays for user country format is unixtime for javascript", "= [utcify(dt) for dt in holiday_current_raw] holiday_next = [utcify(dt) for dt in holiday_next_raw]", "def get_holiday(user, year=None, use_datetime=False): \"\"\" return holidays for user country format is unixtime", "workalendar.asia import Taiwan log = logging.getLogger(__file__) conv_table = { 'fr': France, 'us': California,", "'New Years Day' '2016-02-07': 'Chinese New Years Eve' \"\"\" if not content: return", "an UTC datetime from a Date object \"\"\" return calendar.timegm(date.timetuple()) * 1000 def", "use_datetime=False): \"\"\" return holidays for user country format is unixtime for javascript \"\"\"", "override = {} def init_override(content): \"\"\"Load a yaml file for holidays override. You", "utcify(date): \"\"\" return an UTC datetime from a Date object \"\"\" return calendar.timegm(date.timetuple())", "in override[user.country]: holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][current_year]] if user.country in", "in override[user.country]: holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][next_year]] if not use_datetime:", "datetime as workalendar returns only Date objects holiday_current = [datetime(dt.year, dt.month, dt.day) for", "import calendar from datetime import datetime from workalendar.europe import France, Luxembourg from workalendar.usa", "not use_datetime: # must cast to javascript timestamp holiday_current = [utcify(dt) for dt", "\"\"\" return an UTC datetime from a Date object \"\"\" return calendar.timegm(date.timetuple()) *", "California, 'zh': Taiwan, 'lu': Luxembourg, } override = {} def init_override(content): \"\"\"Load a", "holiday_current_raw = [dt for dt, _ in cal.holidays(current_year)] holiday_next_raw = [dt for dt,", "= [utcify(dt) for dt in holiday_next_raw] else: # must cast to datetime as", "\"\"\" klass = conv_table[user.country] cal = klass() current_year = year or datetime.now().year next_year", "in override[user.country][current_year]] if user.country in override and next_year in override[user.country]: holiday_next_raw = [datetime.strptime(dt,", "in override[user.country][next_year]] if not use_datetime: # must cast to javascript timestamp holiday_current =", "= logging.getLogger(__file__) conv_table = { 'fr': France, 'us': California, 'zh': Taiwan, 'lu': Luxembourg,", "content: return override.update(content) def utcify(date): \"\"\" return an UTC datetime from a Date", "= [datetime(dt.year, dt.month, dt.day) for dt in holiday_current_raw] holiday_next = [datetime(dt.year, dt.month, dt.day)", "country format is unixtime for javascript \"\"\" klass = conv_table[user.country] cal = klass()", "workalendar.europe import France, Luxembourg from workalendar.usa import California from workalendar.asia import Taiwan log", "from datetime import datetime from workalendar.europe import France, Luxembourg from workalendar.usa import California", "a sample: zh: 2016: '2016-01-01': 'New Years Day' '2016-02-07': 'Chinese New Years Eve'", "UTC datetime from a Date object \"\"\" return calendar.timegm(date.timetuple()) * 1000 def get_holiday(user,", "France, 'us': California, 'zh': Taiwan, 'lu': Luxembourg, } override = {} def init_override(content):", "for dt in holiday_next_raw] else: # must cast to datetime as workalendar returns", "or datetime.now().year next_year = current_year + 1 # retrieve Dates from workalendar holiday_current_raw", "datetime from a Date object \"\"\" return calendar.timegm(date.timetuple()) * 1000 def get_holiday(user, year=None,", "format is unixtime for javascript \"\"\" klass = conv_table[user.country] cal = klass() current_year", "must cast to javascript timestamp holiday_current = [utcify(dt) for dt in holiday_current_raw] holiday_next", "for javascript \"\"\" klass = conv_table[user.country] cal = klass() current_year = year or", "override holidays for a country and a year through usage of a configuration", "unixtime for javascript \"\"\" klass = conv_table[user.country] cal = klass() current_year = year", "user.country in override and next_year in override[user.country]: holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt", "from a Date object \"\"\" return calendar.timegm(date.timetuple()) * 1000 def get_holiday(user, year=None, use_datetime=False):", "javascript timestamp holiday_current = [utcify(dt) for dt in holiday_current_raw] holiday_next = [utcify(dt) for", "import California from workalendar.asia import Taiwan log = logging.getLogger(__file__) conv_table = { 'fr':", "if user.country in override and current_year in override[user.country]: holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d') for", "must cast to datetime as workalendar returns only Date objects holiday_current = [datetime(dt.year,", "= klass() current_year = year or datetime.now().year next_year = current_year + 1 #", "dt in override[user.country][next_year]] if not use_datetime: # must cast to javascript timestamp holiday_current", "holiday_current_raw] holiday_next = [utcify(dt) for dt in holiday_next_raw] else: # must cast to", "import France, Luxembourg from workalendar.usa import California from workalendar.asia import Taiwan log =", "Eve' \"\"\" if not content: return override.update(content) def utcify(date): \"\"\" return an UTC", "= {} def init_override(content): \"\"\"Load a yaml file for holidays override. You can", "to javascript timestamp holiday_current = [utcify(dt) for dt in holiday_current_raw] holiday_next = [utcify(dt)", "country and a year through usage of a configuration setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml", "log = logging.getLogger(__file__) conv_table = { 'fr': France, 'us': California, 'zh': Taiwan, 'lu':", "override and current_year in override[user.country]: holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][current_year]]", "%(here)s/conf/holidays.yaml here is a sample: zh: 2016: '2016-01-01': 'New Years Day' '2016-02-07': 'Chinese", "import Taiwan log = logging.getLogger(__file__) conv_table = { 'fr': France, 'us': California, 'zh':", "object \"\"\" return calendar.timegm(date.timetuple()) * 1000 def get_holiday(user, year=None, use_datetime=False): \"\"\" return holidays", "Years Eve' \"\"\" if not content: return override.update(content) def utcify(date): \"\"\" return an", "else: # must cast to datetime as workalendar returns only Date objects holiday_current", "time import logging import calendar from datetime import datetime from workalendar.europe import France,", "def init_override(content): \"\"\"Load a yaml file for holidays override. You can override holidays", "[dt for dt, _ in cal.holidays(current_year)] holiday_next_raw = [dt for dt, _ in", "dt in holiday_next_raw] else: # must cast to datetime as workalendar returns only", "_ in cal.holidays(current_year)] holiday_next_raw = [dt for dt, _ in cal.holidays(next_year)] if user.country", "in cal.holidays(next_year)] if user.country in override and current_year in override[user.country]: holiday_current_raw = [datetime.strptime(dt,", "holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][next_year]] if not use_datetime: # must", "* 1000 def get_holiday(user, year=None, use_datetime=False): \"\"\" return holidays for user country format", "override[user.country][next_year]] if not use_datetime: # must cast to javascript timestamp holiday_current = [utcify(dt)", "klass = conv_table[user.country] cal = klass() current_year = year or datetime.now().year next_year =", "# must cast to javascript timestamp holiday_current = [utcify(dt) for dt in holiday_current_raw]", "dt in holiday_current_raw] holiday_next = [utcify(dt) for dt in holiday_next_raw] else: # must", "dt in override[user.country][current_year]] if user.country in override and next_year in override[user.country]: holiday_next_raw =", "return calendar.timegm(date.timetuple()) * 1000 def get_holiday(user, year=None, use_datetime=False): \"\"\" return holidays for user", "holiday_current = [datetime(dt.year, dt.month, dt.day) for dt in holiday_current_raw] holiday_next = [datetime(dt.year, dt.month,", "dt, _ in cal.holidays(current_year)] holiday_next_raw = [dt for dt, _ in cal.holidays(next_year)] if", "setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here is a sample: zh: 2016: '2016-01-01': 'New Years", "+ 1 # retrieve Dates from workalendar holiday_current_raw = [dt for dt, _", "\"\"\" if not content: return override.update(content) def utcify(date): \"\"\" return an UTC datetime", "holidays for a country and a year through usage of a configuration setting:", "[datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][current_year]] if user.country in override and next_year in", "in holiday_next_raw] else: # must cast to datetime as workalendar returns only Date", "\"\"\"Load a yaml file for holidays override. You can override holidays for a", "if not use_datetime: # must cast to javascript timestamp holiday_current = [utcify(dt) for", "Date object \"\"\" return calendar.timegm(date.timetuple()) * 1000 def get_holiday(user, year=None, use_datetime=False): \"\"\" return", "= [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][next_year]] if not use_datetime: # must cast", "holidays override. You can override holidays for a country and a year through", "'lu': Luxembourg, } override = {} def init_override(content): \"\"\"Load a yaml file for", "from workalendar holiday_current_raw = [dt for dt, _ in cal.holidays(current_year)] holiday_next_raw = [dt", "2016: '2016-01-01': 'New Years Day' '2016-02-07': 'Chinese New Years Eve' \"\"\" if not", "a country and a year through usage of a configuration setting: pyvac.override_holidays_file =", "<reponame>sayoun/pyvac # import time import logging import calendar from datetime import datetime from", "javascript \"\"\" klass = conv_table[user.country] cal = klass() current_year = year or datetime.now().year", "= current_year + 1 # retrieve Dates from workalendar holiday_current_raw = [dt for", "objects holiday_current = [datetime(dt.year, dt.month, dt.day) for dt in holiday_current_raw] holiday_next = [datetime(dt.year,", "for dt, _ in cal.holidays(next_year)] if user.country in override and current_year in override[user.country]:", "get_holiday(user, year=None, use_datetime=False): \"\"\" return holidays for user country format is unixtime for", "= [dt for dt, _ in cal.holidays(current_year)] holiday_next_raw = [dt for dt, _", "'2016-01-01': 'New Years Day' '2016-02-07': 'Chinese New Years Eve' \"\"\" if not content:", "workalendar returns only Date objects holiday_current = [datetime(dt.year, dt.month, dt.day) for dt in", "datetime from workalendar.europe import France, Luxembourg from workalendar.usa import California from workalendar.asia import", "Years Day' '2016-02-07': 'Chinese New Years Eve' \"\"\" if not content: return override.update(content)", "and next_year in override[user.country]: holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][next_year]] if", "= %(here)s/conf/holidays.yaml here is a sample: zh: 2016: '2016-01-01': 'New Years Day' '2016-02-07':", "[datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][next_year]] if not use_datetime: # must cast to", "in holiday_current_raw] holiday_next = [utcify(dt) for dt in holiday_next_raw] else: # must cast", "user country format is unixtime for javascript \"\"\" klass = conv_table[user.country] cal =", "France, Luxembourg from workalendar.usa import California from workalendar.asia import Taiwan log = logging.getLogger(__file__)", "next_year = current_year + 1 # retrieve Dates from workalendar holiday_current_raw = [dt", "for a country and a year through usage of a configuration setting: pyvac.override_holidays_file", "returns only Date objects holiday_current = [datetime(dt.year, dt.month, dt.day) for dt in holiday_current_raw]", "configuration setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here is a sample: zh: 2016: '2016-01-01': 'New", "only Date objects holiday_current = [datetime(dt.year, dt.month, dt.day) for dt in holiday_current_raw] holiday_next", "datetime.now().year next_year = current_year + 1 # retrieve Dates from workalendar holiday_current_raw =", "holiday_current_raw] holiday_next = [datetime(dt.year, dt.month, dt.day) for dt in holiday_next_raw] return holiday_current +", "{ 'fr': France, 'us': California, 'zh': Taiwan, 'lu': Luxembourg, } override = {}", "Luxembourg from workalendar.usa import California from workalendar.asia import Taiwan log = logging.getLogger(__file__) conv_table", "calendar.timegm(date.timetuple()) * 1000 def get_holiday(user, year=None, use_datetime=False): \"\"\" return holidays for user country", "next_year in override[user.country]: holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][next_year]] if not", "Day' '2016-02-07': 'Chinese New Years Eve' \"\"\" if not content: return override.update(content) def", "file for holidays override. You can override holidays for a country and a", "override[user.country]: holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][next_year]] if not use_datetime: #", "to datetime as workalendar returns only Date objects holiday_current = [datetime(dt.year, dt.month, dt.day)", "} override = {} def init_override(content): \"\"\"Load a yaml file for holidays override.", "conv_table[user.country] cal = klass() current_year = year or datetime.now().year next_year = current_year +", "'Chinese New Years Eve' \"\"\" if not content: return override.update(content) def utcify(date): \"\"\"", "override.update(content) def utcify(date): \"\"\" return an UTC datetime from a Date object \"\"\"", "def utcify(date): \"\"\" return an UTC datetime from a Date object \"\"\" return", "import datetime from workalendar.europe import France, Luxembourg from workalendar.usa import California from workalendar.asia", "for holidays override. You can override holidays for a country and a year", "in cal.holidays(current_year)] holiday_next_raw = [dt for dt, _ in cal.holidays(next_year)] if user.country in", "current_year = year or datetime.now().year next_year = current_year + 1 # retrieve Dates", "override[user.country][current_year]] if user.country in override and next_year in override[user.country]: holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d')", "{} def init_override(content): \"\"\"Load a yaml file for holidays override. You can override", "is a sample: zh: 2016: '2016-01-01': 'New Years Day' '2016-02-07': 'Chinese New Years", "Dates from workalendar holiday_current_raw = [dt for dt, _ in cal.holidays(current_year)] holiday_next_raw =", "'2016-02-07': 'Chinese New Years Eve' \"\"\" if not content: return override.update(content) def utcify(date):", "zh: 2016: '2016-01-01': 'New Years Day' '2016-02-07': 'Chinese New Years Eve' \"\"\" if", "for dt in holiday_current_raw] holiday_next = [utcify(dt) for dt in holiday_next_raw] else: #", "# must cast to datetime as workalendar returns only Date objects holiday_current =", "from workalendar.europe import France, Luxembourg from workalendar.usa import California from workalendar.asia import Taiwan", "through usage of a configuration setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here is a sample:", "a Date object \"\"\" return calendar.timegm(date.timetuple()) * 1000 def get_holiday(user, year=None, use_datetime=False): \"\"\"", "cast to javascript timestamp holiday_current = [utcify(dt) for dt in holiday_current_raw] holiday_next =", "user.country in override and current_year in override[user.country]: holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt", "[utcify(dt) for dt in holiday_next_raw] else: # must cast to datetime as workalendar", "'fr': France, 'us': California, 'zh': Taiwan, 'lu': Luxembourg, } override = {} def", "use_datetime: # must cast to javascript timestamp holiday_current = [utcify(dt) for dt in", "init_override(content): \"\"\"Load a yaml file for holidays override. You can override holidays for", "New Years Eve' \"\"\" if not content: return override.update(content) def utcify(date): \"\"\" return", "current_year in override[user.country]: holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][current_year]] if user.country", "for dt in holiday_current_raw] holiday_next = [datetime(dt.year, dt.month, dt.day) for dt in holiday_next_raw]", "from workalendar.usa import California from workalendar.asia import Taiwan log = logging.getLogger(__file__) conv_table =", "import logging import calendar from datetime import datetime from workalendar.europe import France, Luxembourg", "from workalendar.asia import Taiwan log = logging.getLogger(__file__) conv_table = { 'fr': France, 'us':", "in override and current_year in override[user.country]: holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in", "return an UTC datetime from a Date object \"\"\" return calendar.timegm(date.timetuple()) * 1000", "cal.holidays(next_year)] if user.country in override and current_year in override[user.country]: holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d')", "workalendar.usa import California from workalendar.asia import Taiwan log = logging.getLogger(__file__) conv_table = {", "holiday_next = [datetime(dt.year, dt.month, dt.day) for dt in holiday_next_raw] return holiday_current + holiday_next", "not content: return override.update(content) def utcify(date): \"\"\" return an UTC datetime from a", "for dt, _ in cal.holidays(current_year)] holiday_next_raw = [dt for dt, _ in cal.holidays(next_year)]", "'%Y-%m-%d') for dt in override[user.country][next_year]] if not use_datetime: # must cast to javascript", "a configuration setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here is a sample: zh: 2016: '2016-01-01':", "= [dt for dt, _ in cal.holidays(next_year)] if user.country in override and current_year", "'zh': Taiwan, 'lu': Luxembourg, } override = {} def init_override(content): \"\"\"Load a yaml", "override[user.country]: holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][current_year]] if user.country in override", "current_year + 1 # retrieve Dates from workalendar holiday_current_raw = [dt for dt,", "[datetime(dt.year, dt.month, dt.day) for dt in holiday_current_raw] holiday_next = [datetime(dt.year, dt.month, dt.day) for", "Taiwan, 'lu': Luxembourg, } override = {} def init_override(content): \"\"\"Load a yaml file", "[dt for dt, _ in cal.holidays(next_year)] if user.country in override and current_year in", "calendar from datetime import datetime from workalendar.europe import France, Luxembourg from workalendar.usa import", "California from workalendar.asia import Taiwan log = logging.getLogger(__file__) conv_table = { 'fr': France,", "\"\"\" return calendar.timegm(date.timetuple()) * 1000 def get_holiday(user, year=None, use_datetime=False): \"\"\" return holidays for", "dt.day) for dt in holiday_current_raw] holiday_next = [datetime(dt.year, dt.month, dt.day) for dt in", "if not content: return override.update(content) def utcify(date): \"\"\" return an UTC datetime from", "holidays for user country format is unixtime for javascript \"\"\" klass = conv_table[user.country]", "# retrieve Dates from workalendar holiday_current_raw = [dt for dt, _ in cal.holidays(current_year)]", "usage of a configuration setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here is a sample: zh:", "here is a sample: zh: 2016: '2016-01-01': 'New Years Day' '2016-02-07': 'Chinese New", "and current_year in override[user.country]: holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][current_year]] if", "for dt in override[user.country][current_year]] if user.country in override and next_year in override[user.country]: holiday_next_raw", "can override holidays for a country and a year through usage of a", "= { 'fr': France, 'us': California, 'zh': Taiwan, 'lu': Luxembourg, } override =", "= year or datetime.now().year next_year = current_year + 1 # retrieve Dates from", "= [datetime.strptime(dt, '%Y-%m-%d') for dt in override[user.country][current_year]] if user.country in override and next_year", "\"\"\" return holidays for user country format is unixtime for javascript \"\"\" klass", "logging import calendar from datetime import datetime from workalendar.europe import France, Luxembourg from", "Taiwan log = logging.getLogger(__file__) conv_table = { 'fr': France, 'us': California, 'zh': Taiwan,", "yaml file for holidays override. You can override holidays for a country and", "pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here is a sample: zh: 2016: '2016-01-01': 'New Years Day'", "year or datetime.now().year next_year = current_year + 1 # retrieve Dates from workalendar", "holiday_current = [utcify(dt) for dt in holiday_current_raw] holiday_next = [utcify(dt) for dt in", "Date objects holiday_current = [datetime(dt.year, dt.month, dt.day) for dt in holiday_current_raw] holiday_next =", "year through usage of a configuration setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here is a", "for user country format is unixtime for javascript \"\"\" klass = conv_table[user.country] cal", "klass() current_year = year or datetime.now().year next_year = current_year + 1 # retrieve", "dt in holiday_current_raw] holiday_next = [datetime(dt.year, dt.month, dt.day) for dt in holiday_next_raw] return", "'%Y-%m-%d') for dt in override[user.country][current_year]] if user.country in override and next_year in override[user.country]:", "a year through usage of a configuration setting: pyvac.override_holidays_file = %(here)s/conf/holidays.yaml here is", "= conv_table[user.country] cal = klass() current_year = year or datetime.now().year next_year = current_year", "holiday_next_raw] else: # must cast to datetime as workalendar returns only Date objects", "is unixtime for javascript \"\"\" klass = conv_table[user.country] cal = klass() current_year =", "holiday_next_raw = [dt for dt, _ in cal.holidays(next_year)] if user.country in override and", "'us': California, 'zh': Taiwan, 'lu': Luxembourg, } override = {} def init_override(content): \"\"\"Load", "dt, _ in cal.holidays(next_year)] if user.country in override and current_year in override[user.country]: holiday_current_raw" ]
[ "** y_true - 1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts)", "if word in word_dict: emb_dict[word] = vec emb_table = [0]*len(word_dict) dummy = np.zeros(300,dtype='float32')", "dcg_score(y_true, y_score, k=10): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) gains = 2", "def auc(label,score): label=np.array(label) score=np.array(score) false_score = score[label==0] positive_score = score[label==1] num_positive = (label==1).sum()", "np.take(y_true, order) rr_score = y_true / (np.arange(len(y_true)) + 1) return np.sum(rr_score) / np.sum(y_true)", "score=np.array(score) false_score = score[label==0] positive_score = score[label==1] num_positive = (label==1).sum() num_negative = (label==0).sum()", "axis=0) Sigma = np.cov(all_emb.T) norm = np.random.multivariate_normal(mu, Sigma, 1) for i in range(len(emb_table)):", "and x<0.2: return 2 elif x>=0.2 and x<0.6: return 3 elif x>=0.6: return", "emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32') mu = np.mean(all_emb, axis=0) Sigma", "emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32') mu = np.mean(all_emb, axis=0) Sigma =", "rr_score = y_true / (np.arange(len(y_true)) + 1) return np.sum(rr_score) / np.sum(y_true) def auc(label,score):", "np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32') mu = np.mean(all_emb, axis=0) Sigma = np.cov(all_emb.T) norm", "all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32') mu = np.mean(all_emb, axis=0) Sigma = np.cov(all_emb.T) norm =", "open(embfile,'rb')as f: while True: line = f.readline() if len(line) == 0: break data", "x<0.2: return 2 elif x>=0.2 and x<0.6: return 3 elif x>=0.6: return 4", "in word_dict: emb_dict[word] = vec emb_table = [0]*len(word_dict) dummy = np.zeros(300,dtype='float32') all_emb =", "import cholesky import numpy as np def senti2cate(x): if x<=-0.6: return 0 elif", "3 elif x>=0.6: return 4 def dcg_score(y_true, y_score, k=10): order = np.argsort(y_score)[::-1] y_true", "mu = np.mean(all_emb, axis=0) Sigma = np.cov(all_emb.T) norm = np.random.multivariate_normal(mu, Sigma, 1) for", "i in emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32') mu = np.mean(all_emb,", "= dcg_score(y_true, y_score, k) return actual / best def mrr_score(y_true, y_score): order =", "for i in emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32') mu =", "= np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10): best", "numpy.linalg import cholesky import numpy as np def senti2cate(x): if x<=-0.6: return 0", "positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def", "[float(x) for x in data[1:]] if word in word_dict: emb_dict[word] = vec emb_table", "x<0.6: return 3 elif x>=0.6: return 4 def dcg_score(y_true, y_score, k=10): order =", "+ 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10): best = dcg_score(y_true,", "== int: emb_table[i] = np.reshape(norm, 300) emb_table[0] = np.random.uniform(-0.03,0.03,size=(300,)) emb_table = np.array(emb_table,dtype='float32') return", "word = data[0].decode() if len(word) != 0: vec = [float(x) for x in", "[] for i in emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32') mu", "with open(embfile,'rb')as f: while True: line = f.readline() if len(line) == 0: break", "y_score, k=10): best = dcg_score(y_true, y_true, k) actual = dcg_score(y_true, y_score, k) return", "discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10):", "len(word) != 0: vec = [float(x) for x in data[1:]] if word in", "num_positive = (label==1).sum() num_negative = (label==0).sum() positive_score = positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1) false_score", "= np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict", "data[0].decode() if len(word) != 0: vec = [float(x) for x in data[1:]] if", "Sigma = np.cov(all_emb.T) norm = np.random.multivariate_normal(mu, Sigma, 1) for i in range(len(emb_table)): if", "/ best def mrr_score(y_true, y_score): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order) rr_score", "y_true - 1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def", "= [0]*len(word_dict) dummy = np.zeros(300,dtype='float32') all_emb = [] for i in emb_dict: emb_table[word_dict[i][0]]", "np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10): best = dcg_score(y_true, y_true, k) actual", "while True: line = f.readline() if len(line) == 0: break data = line.split()", "2 elif x>=0.2 and x<0.6: return 3 elif x>=0.6: return 4 def dcg_score(y_true,", "(np.arange(len(y_true)) + 1) return np.sum(rr_score) / np.sum(y_true) def auc(label,score): label=np.array(label) score=np.array(score) false_score =", "= f.readline() if len(line) == 0: break data = line.split() word = data[0].decode()", "senti2cate(x): if x<=-0.6: return 0 elif x>-0.6 and x<=-0.2: return 1 elif x>-0.2", "np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict =", "def dcg_score(y_true, y_score, k=10): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) gains =", "elif x>-0.2 and x<0.2: return 2 elif x>=0.2 and x<0.6: return 3 elif", "= line.split() word = data[0].decode() if len(word) != 0: vec = [float(x) for", "range(len(emb_table)): if type(emb_table[i]) == int: emb_table[i] = np.reshape(norm, 300) emb_table[0] = np.random.uniform(-0.03,0.03,size=(300,)) emb_table", "false_score = np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict = {} with open(embfile,'rb')as f:", "in emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32') mu = np.mean(all_emb, axis=0)", "type(emb_table[i]) == int: emb_table[i] = np.reshape(norm, 300) emb_table[0] = np.random.uniform(-0.03,0.03,size=(300,)) emb_table = np.array(emb_table,dtype='float32')", "= positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean())", "= score[label==1] num_positive = (label==1).sum() num_negative = (label==0).sum() positive_score = positive_score.reshape((num_positive,1)) positive_score =", "int: emb_table[i] = np.reshape(norm, 300) emb_table[0] = np.random.uniform(-0.03,0.03,size=(300,)) emb_table = np.array(emb_table,dtype='float32') return emb_table", "x>-0.6 and x<=-0.2: return 1 elif x>-0.2 and x<0.2: return 2 elif x>=0.2", "order) rr_score = y_true / (np.arange(len(y_true)) + 1) return np.sum(rr_score) / np.sum(y_true) def", "return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10): best = dcg_score(y_true, y_true, k)", "def ndcg_score(y_true, y_score, k=10): best = dcg_score(y_true, y_true, k) actual = dcg_score(y_true, y_score,", "vec emb_table = [0]*len(word_dict) dummy = np.zeros(300,dtype='float32') all_emb = [] for i in", "= np.take(y_true, order[:k]) gains = 2 ** y_true - 1 discounts = np.log2(np.arange(len(y_true))", "return 0 elif x>-0.6 and x<=-0.2: return 1 elif x>-0.2 and x<0.2: return", "= np.zeros(300,dtype='float32') all_emb = [] for i in emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]])", "y_score): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order) rr_score = y_true / (np.arange(len(y_true))", "cholesky import numpy as np def senti2cate(x): if x<=-0.6: return 0 elif x>-0.6", "= np.mean(all_emb, axis=0) Sigma = np.cov(all_emb.T) norm = np.random.multivariate_normal(mu, Sigma, 1) for i", "= np.random.multivariate_normal(mu, Sigma, 1) for i in range(len(emb_table)): if type(emb_table[i]) == int: emb_table[i]", "k=10): best = dcg_score(y_true, y_true, k) actual = dcg_score(y_true, y_score, k) return actual", "np def senti2cate(x): if x<=-0.6: return 0 elif x>-0.6 and x<=-0.2: return 1", "4 def dcg_score(y_true, y_score, k=10): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) gains", "y_true / (np.arange(len(y_true)) + 1) return np.sum(rr_score) / np.sum(y_true) def auc(label,score): label=np.array(label) score=np.array(score)", "k=10): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) gains = 2 ** y_true", "- 1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true,", "/ discounts) def ndcg_score(y_true, y_score, k=10): best = dcg_score(y_true, y_true, k) actual =", "= dcg_score(y_true, y_true, k) actual = dcg_score(y_true, y_score, k) return actual / best", "= np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32') mu = np.mean(all_emb, axis=0) Sigma = np.cov(all_emb.T)", "x<=-0.6: return 0 elif x>-0.6 and x<=-0.2: return 1 elif x>-0.2 and x<0.2:", "order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order) rr_score = y_true / (np.arange(len(y_true)) +", "in range(len(emb_table)): if type(emb_table[i]) == int: emb_table[i] = np.reshape(norm, 300) emb_table[0] = np.random.uniform(-0.03,0.03,size=(300,))", "= np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) gains = 2 ** y_true - 1", "= vec emb_table = [0]*len(word_dict) dummy = np.zeros(300,dtype='float32') all_emb = [] for i", "dummy = np.zeros(300,dtype='float32') all_emb = [] for i in emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32')", "len(line) == 0: break data = line.split() word = data[0].decode() if len(word) !=", "return actual / best def mrr_score(y_true, y_score): order = np.argsort(y_score)[::-1] y_true = np.take(y_true,", "num_negative = (label==0).sum() positive_score = positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative)) false_score", "np.argsort(y_score)[::-1] y_true = np.take(y_true, order) rr_score = y_true / (np.arange(len(y_true)) + 1) return", "2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10): best = dcg_score(y_true, y_true,", "!= 0: vec = [float(x) for x in data[1:]] if word in word_dict:", "auc(label,score): label=np.array(label) score=np.array(score) false_score = score[label==0] positive_score = score[label==1] num_positive = (label==1).sum() num_negative", "discounts) def ndcg_score(y_true, y_score, k=10): best = dcg_score(y_true, y_true, k) actual = dcg_score(y_true,", "y_true = np.take(y_true, order[:k]) gains = 2 ** y_true - 1 discounts =", "emb_dict = {} with open(embfile,'rb')as f: while True: line = f.readline() if len(line)", "label=np.array(label) score=np.array(score) false_score = score[label==0] positive_score = score[label==1] num_positive = (label==1).sum() num_negative =", "data = line.split() word = data[0].decode() if len(word) != 0: vec = [float(x)", "x in data[1:]] if word in word_dict: emb_dict[word] = vec emb_table = [0]*len(word_dict)", "all_emb = [] for i in emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb =", "x<=-0.2: return 1 elif x>-0.2 and x<0.2: return 2 elif x>=0.2 and x<0.6:", "def mrr_score(y_true, y_score): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order) rr_score = y_true", "mrr_score(y_true, y_score): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order) rr_score = y_true /", "score[label==0] positive_score = score[label==1] num_positive = (label==1).sum() num_negative = (label==0).sum() positive_score = positive_score.reshape((num_positive,1))", "np.sum(rr_score) / np.sum(y_true) def auc(label,score): label=np.array(label) score=np.array(score) false_score = score[label==0] positive_score = score[label==1]", "np.mean(all_emb, axis=0) Sigma = np.cov(all_emb.T) norm = np.random.multivariate_normal(mu, Sigma, 1) for i in", "and x<0.6: return 3 elif x>=0.6: return 4 def dcg_score(y_true, y_score, k=10): order", "x>-0.2 and x<0.2: return 2 elif x>=0.2 and x<0.6: return 3 elif x>=0.6:", "return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict = {} with open(embfile,'rb')as f: while True: line", "np.array(all_emb,dtype='float32') mu = np.mean(all_emb, axis=0) Sigma = np.cov(all_emb.T) norm = np.random.multivariate_normal(mu, Sigma, 1)", "positive_score = np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict):", "gains = 2 ** y_true - 1 discounts = np.log2(np.arange(len(y_true)) + 2) return", "and x<=-0.2: return 1 elif x>-0.2 and x<0.2: return 2 elif x>=0.2 and", "1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score,", "def senti2cate(x): if x<=-0.6: return 0 elif x>-0.6 and x<=-0.2: return 1 elif", "numpy as np def senti2cate(x): if x<=-0.6: return 0 elif x>-0.6 and x<=-0.2:", "f: while True: line = f.readline() if len(line) == 0: break data =", "0: break data = line.split() word = data[0].decode() if len(word) != 0: vec", "best = dcg_score(y_true, y_true, k) actual = dcg_score(y_true, y_score, k) return actual /", "= [float(x) for x in data[1:]] if word in word_dict: emb_dict[word] = vec", "dcg_score(y_true, y_score, k) return actual / best def mrr_score(y_true, y_score): order = np.argsort(y_score)[::-1]", "from numpy.linalg import cholesky import numpy as np def senti2cate(x): if x<=-0.6: return", "import numpy as np def senti2cate(x): if x<=-0.6: return 0 elif x>-0.6 and", "for x in data[1:]] if word in word_dict: emb_dict[word] = vec emb_table =", "1 elif x>-0.2 and x<0.2: return 2 elif x>=0.2 and x<0.6: return 3", "in data[1:]] if word in word_dict: emb_dict[word] = vec emb_table = [0]*len(word_dict) dummy", "elif x>=0.6: return 4 def dcg_score(y_true, y_score, k=10): order = np.argsort(y_score)[::-1] y_true =", "positive_score = positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0) return", "= np.cov(all_emb.T) norm = np.random.multivariate_normal(mu, Sigma, 1) for i in range(len(emb_table)): if type(emb_table[i])", "f.readline() if len(line) == 0: break data = line.split() word = data[0].decode() if", "score[label==1] num_positive = (label==1).sum() num_negative = (label==0).sum() positive_score = positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1)", "best def mrr_score(y_true, y_score): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order) rr_score =", "= data[0].decode() if len(word) != 0: vec = [float(x) for x in data[1:]]", "= np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict = {} with open(embfile,'rb')as f: while", "[0]*len(word_dict) dummy = np.zeros(300,dtype='float32') all_emb = [] for i in emb_dict: emb_table[word_dict[i][0]] =", "break data = line.split() word = data[0].decode() if len(word) != 0: vec =", "np.zeros(300,dtype='float32') all_emb = [] for i in emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb", "k) actual = dcg_score(y_true, y_score, k) return actual / best def mrr_score(y_true, y_score):", "1) return np.sum(rr_score) / np.sum(y_true) def auc(label,score): label=np.array(label) score=np.array(score) false_score = score[label==0] positive_score", "def embedding(embfile,word_dict): emb_dict = {} with open(embfile,'rb')as f: while True: line = f.readline()", "all_emb = np.array(all_emb,dtype='float32') mu = np.mean(all_emb, axis=0) Sigma = np.cov(all_emb.T) norm = np.random.multivariate_normal(mu,", "y_true = np.take(y_true, order) rr_score = y_true / (np.arange(len(y_true)) + 1) return np.sum(rr_score)", "data[1:]] if word in word_dict: emb_dict[word] = vec emb_table = [0]*len(word_dict) dummy =", "return 3 elif x>=0.6: return 4 def dcg_score(y_true, y_score, k=10): order = np.argsort(y_score)[::-1]", "return 4 def dcg_score(y_true, y_score, k=10): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k])", "k) return actual / best def mrr_score(y_true, y_score): order = np.argsort(y_score)[::-1] y_true =", "return np.sum(rr_score) / np.sum(y_true) def auc(label,score): label=np.array(label) score=np.array(score) false_score = score[label==0] positive_score =", "np.sum(y_true) def auc(label,score): label=np.array(label) score=np.array(score) false_score = score[label==0] positive_score = score[label==1] num_positive =", "return 1 elif x>-0.2 and x<0.2: return 2 elif x>=0.2 and x<0.6: return", "positive_score = score[label==1] num_positive = (label==1).sum() num_negative = (label==0).sum() positive_score = positive_score.reshape((num_positive,1)) positive_score", "= y_true / (np.arange(len(y_true)) + 1) return np.sum(rr_score) / np.sum(y_true) def auc(label,score): label=np.array(label)", "x>=0.6: return 4 def dcg_score(y_true, y_score, k=10): order = np.argsort(y_score)[::-1] y_true = np.take(y_true,", "{} with open(embfile,'rb')as f: while True: line = f.readline() if len(line) == 0:", "y_score, k=10): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) gains = 2 **", "/ np.sum(y_true) def auc(label,score): label=np.array(label) score=np.array(score) false_score = score[label==0] positive_score = score[label==1] num_positive", "if len(word) != 0: vec = [float(x) for x in data[1:]] if word", "2 ** y_true - 1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains /", "vec = [float(x) for x in data[1:]] if word in word_dict: emb_dict[word] =", "Sigma, 1) for i in range(len(emb_table)): if type(emb_table[i]) == int: emb_table[i] = np.reshape(norm,", "True: line = f.readline() if len(line) == 0: break data = line.split() word", "emb_table = [0]*len(word_dict) dummy = np.zeros(300,dtype='float32') all_emb = [] for i in emb_dict:", "as np def senti2cate(x): if x<=-0.6: return 0 elif x>-0.6 and x<=-0.2: return", "np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict = {} with open(embfile,'rb')as f: while True:", "actual = dcg_score(y_true, y_score, k) return actual / best def mrr_score(y_true, y_score): order", "= 2 ** y_true - 1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains", "word_dict: emb_dict[word] = vec emb_table = [0]*len(word_dict) dummy = np.zeros(300,dtype='float32') all_emb = []", "i in range(len(emb_table)): if type(emb_table[i]) == int: emb_table[i] = np.reshape(norm, 300) emb_table[0] =", "order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) gains = 2 ** y_true -", "false_score = score[label==0] positive_score = score[label==1] num_positive = (label==1).sum() num_negative = (label==0).sum() positive_score", "false_score = false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict = {}", "== 0: break data = line.split() word = data[0].decode() if len(word) != 0:", "if type(emb_table[i]) == int: emb_table[i] = np.reshape(norm, 300) emb_table[0] = np.random.uniform(-0.03,0.03,size=(300,)) emb_table =", "for i in range(len(emb_table)): if type(emb_table[i]) == int: emb_table[i] = np.reshape(norm, 300) emb_table[0]", "(label==0).sum() positive_score = positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0)", "0 elif x>-0.6 and x<=-0.2: return 1 elif x>-0.2 and x<0.2: return 2", "= np.argsort(y_score)[::-1] y_true = np.take(y_true, order) rr_score = y_true / (np.arange(len(y_true)) + 1)", "x>=0.2 and x<0.6: return 3 elif x>=0.6: return 4 def dcg_score(y_true, y_score, k=10):", "dcg_score(y_true, y_true, k) actual = dcg_score(y_true, y_score, k) return actual / best def", "np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10): best =", "line = f.readline() if len(line) == 0: break data = line.split() word =", "norm = np.random.multivariate_normal(mu, Sigma, 1) for i in range(len(emb_table)): if type(emb_table[i]) == int:", "np.take(y_true, order[:k]) gains = 2 ** y_true - 1 discounts = np.log2(np.arange(len(y_true)) +", "order[:k]) gains = 2 ** y_true - 1 discounts = np.log2(np.arange(len(y_true)) + 2)", "line.split() word = data[0].decode() if len(word) != 0: vec = [float(x) for x", "= {} with open(embfile,'rb')as f: while True: line = f.readline() if len(line) ==", "(label==1).sum() num_negative = (label==0).sum() positive_score = positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative))", "np.cov(all_emb.T) norm = np.random.multivariate_normal(mu, Sigma, 1) for i in range(len(emb_table)): if type(emb_table[i]) ==", "elif x>=0.2 and x<0.6: return 3 elif x>=0.6: return 4 def dcg_score(y_true, y_score,", "false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict = {} with open(embfile,'rb')as", "0: vec = [float(x) for x in data[1:]] if word in word_dict: emb_dict[word]", "= (label==1).sum() num_negative = (label==0).sum() positive_score = positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1) false_score =", "1) for i in range(len(emb_table)): if type(emb_table[i]) == int: emb_table[i] = np.reshape(norm, 300)", "y_score, k) return actual / best def mrr_score(y_true, y_score): order = np.argsort(y_score)[::-1] y_true", "ndcg_score(y_true, y_score, k=10): best = dcg_score(y_true, y_true, k) actual = dcg_score(y_true, y_score, k)", "= np.array(all_emb,dtype='float32') mu = np.mean(all_emb, axis=0) Sigma = np.cov(all_emb.T) norm = np.random.multivariate_normal(mu, Sigma,", "if x<=-0.6: return 0 elif x>-0.6 and x<=-0.2: return 1 elif x>-0.2 and", "/ (np.arange(len(y_true)) + 1) return np.sum(rr_score) / np.sum(y_true) def auc(label,score): label=np.array(label) score=np.array(score) false_score", "emb_dict[word] = vec emb_table = [0]*len(word_dict) dummy = np.zeros(300,dtype='float32') all_emb = [] for", "+ 1) return np.sum(rr_score) / np.sum(y_true) def auc(label,score): label=np.array(label) score=np.array(score) false_score = score[label==0]", "= [] for i in emb_dict: emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32') all_emb.append(emb_table[word_dict[i][0]]) all_emb = np.array(all_emb,dtype='float32')", "actual / best def mrr_score(y_true, y_score): order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order)", "if len(line) == 0: break data = line.split() word = data[0].decode() if len(word)", "= false_score.reshape((1,num_negative)) false_score = np.repeat(false_score,num_positive,axis=0) return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict = {} with", "embedding(embfile,word_dict): emb_dict = {} with open(embfile,'rb')as f: while True: line = f.readline() if", "return 2 elif x>=0.2 and x<0.6: return 3 elif x>=0.6: return 4 def", "y_true, k) actual = dcg_score(y_true, y_score, k) return actual / best def mrr_score(y_true,", "elif x>-0.6 and x<=-0.2: return 1 elif x>-0.2 and x<0.2: return 2 elif", "1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean()) def embedding(embfile,word_dict): emb_dict = {} with open(embfile,'rb')as f: while True: line =", "= (label==0).sum() positive_score = positive_score.reshape((num_positive,1)) positive_score = np.repeat(positive_score,num_negative,axis=1) false_score = false_score.reshape((1,num_negative)) false_score =", "np.random.multivariate_normal(mu, Sigma, 1) for i in range(len(emb_table)): if type(emb_table[i]) == int: emb_table[i] =", "= score[label==0] positive_score = score[label==1] num_positive = (label==1).sum() num_negative = (label==0).sum() positive_score =", "= np.take(y_true, order) rr_score = y_true / (np.arange(len(y_true)) + 1) return np.sum(rr_score) /", "np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) gains = 2 ** y_true - 1 discounts", "word in word_dict: emb_dict[word] = vec emb_table = [0]*len(word_dict) dummy = np.zeros(300,dtype='float32') all_emb" ]
[ "a single matching version is present in released versions \"\"\" matching_versions = [\"1.5.0-1.4.el7\"]", "properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\" when sorting", "\"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\" when origin package is in 3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\",", "test_origin_package_with_old_schema(self): \"\"\" when origin package is in 1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"),", "only single matching pre-release version is present in pre-released versions \"\"\" matching_versions =", "matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): \"\"\"", "origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs)", "package \"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj", "there is no duplicate package, return the single one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\",", "= [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs)", "version is the first from the new origin versioning schema - origin-3.6 \"\"\"", "one pre-release version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs =", "origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version", "3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def", "versioning schema is in 3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase):", "\"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def", "= TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) if __name__ ==", "when only one matching version exist and its pre-release, it is returned \"\"\"", "1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\" when origin package", "test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\" when both release and pre-release versions", "(\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\" when the origin version is in short format", "rpmutils.splitFilename(pkg) pkg_vra = pkg_version + \"-\" + pkg_release + \".\" + pkg_arch pkg_tup", "higher then the first version of the new origin versioning schema - origin-3.6", "test_with_single_matching_release_version(self): \"\"\" when only a single matching version is present in released versions", "\"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions,", "\"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is in short format and", "\"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\" when multiple matching version", "it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'),", "\"\"\" when the origin version is higher then the first version of the", "openshift-ansible, which doesnt have different versioning schema is in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\",", "def test_sort_packages_with_same_minor_version(self): \"\"\" when sorting origin packages within the same minor version \"\"\"", "(\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema", "= [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj,", "the origin version is in short and old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"),", "test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\" when only one", "def test_with_multiple_pre_release(self): \"\"\" when only one pre-release version exist, it is returned \"\"\"", "different versioning schema is in 3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\")) class", "= (pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup))", "in pre-released versions \"\"\" matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions),", "[\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test", "+ pkg_release + \".\" + pkg_arch pkg_tup = (pkg_name , pkg_arch, pkg_epoch, pkg_version,", "\"3.7\"), (\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is the first", "self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs", "+ \"-\" + pkg_release + \".\" + pkg_arch pkg_tup = (pkg_name , pkg_arch,", "origin packages with different minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs =", "test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\" when there is no duplicate package, return", "old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\" when the", "doesnt have different versioning schema is in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\",", "when only a single matching version is present in released versions \"\"\" matching_versions", "\"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def", "self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is the", "test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema is in 3.6", "origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self): \"\"\" when the origin version", "version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)", "properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\" when sorting", "versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when", "def test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin',", "versions \"\"\" matching_versions = [\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\"", "\"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) if", "version self.release = release self.epoch = epoch self.vra = vra self.pkgtup = pkgtup", "test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\"", "class TestPackage(object): def __init__(self, name, version, release, epoch, vra, pkgtup): self.name = name", "'1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\" when only one pre-release version exist, it is", "def test_origin_with_legacy_schema(self): \"\"\" when the origin version is in the old versioning schema", "\"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\" when origin package is in", "(\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema,", "pre-release versions exist, only release version is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\",", "version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\") def test_origin_package_with_old_schema(self): \"\"\" when origin package is", "one pre-release version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs", "origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\") def test_origin_package_with_old_schema(self):", "properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj,", "matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): \"\"\"", "self.pkgtup = pkgtup def __eq__(self, other): return self.__dict__ == other.__dict__ @classmethod def create_test_packages(self,", "versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\" when", "\"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs)", "only one matching version exist and its pre-release, it is returned \"\"\" test_pkgs", "(\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema", "self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\") def test_origin_package_with_old_schema(self): \"\"\" when origin package is in 1.5", "self.release = release self.epoch = epoch self.vra = vra self.pkgtup = pkgtup def", "\"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs),", "when origin package is in 3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\") class", "release and pre-release versions exist, only release version is returned \"\"\" test_pkgs =", "for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\" when the origin version is higher then the", "versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self): \"\"\" when", "pkg_release + \".\" + pkg_arch pkg_tup = (pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release)", "return self.__dict__ == other.__dict__ @classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs = [] for pkg", "\"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema is", "pkgtup): self.name = name self.version = version self.release = release self.epoch = epoch", "version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self):", "versions exist, only release version is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"]", "version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt", "\"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7'])", "is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'),", "\"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\" when the origin version is", "first version of the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"),", "= sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\" when sorting origin packages with different", "return only one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs", "test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\"", "= TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\" when both release and", "short format and higher then the first version of the new origin versioning", "TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\" when both release and", "package is in 1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\"", "'1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\" when the origin version", "\"\"\" when the origin version is in short format and first from the", "\"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj)", "def test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple matching pre-release version are present in pre-released versions", "self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\" when origin package is in 3.5", "\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\" when only", "test_origin_with_short_legacy_schema(self): \"\"\" when the origin version is in short and old versioning schema", "pkg_vra, pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\" when", "= [\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def", "== other.__dict__ @classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs = [] for pkg in test_pkgs:", "def create_test_packages(self, test_pkgs): test_pkgs_objs = [] for pkg in test_pkgs: pkg_name, pkg_version, pkg_release,", "test_non_origin_package_with_old_schema(self): \"\"\" when origin package is in 3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"),", "matching_versions = [\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple", "test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\" when is multiple duplicate", "when both release and pre-release versions exist, only release versions are returned \"\"\"", "packages within the same minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs =", "\"\"\" when is multiple duplicate packages, return only one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\",", "version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin',", "\"\"\" matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self):", "\"Test for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\" when only one matching version exist and", "<reponame>brenton/aos-cd-jobs import unittest from determine_install_upgrade_version import * class TestPackage(object): def __init__(self, name, version,", "version, release, epoch, vra, pkgtup): self.name = name self.version = version self.release =", "in 3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\"", "new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self):", "when only single matching pre-release version is present in pre-released versions \"\"\" matching_versions", "TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\" when sorting origin packages", "returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"])", "with exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj", "['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\" when only one pre-release version exist, it is returned", "= [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self):", "release and pre-release versions exist, only release versions are returned \"\"\" test_pkgs =", "test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema, is in 3.7", "`determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\" when only one matching version exist and its pre-release,", "(\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\" when origin package", "+ \".\" + pkg_arch pkg_tup = (pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name,", "origin version is in short and old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\",", "when openshift-ansible, which doesnt have different versioning schema is in 3.6 version \"\"\"", "same minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj", "is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs,", "is in 3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for", "pre-release version is present in pre-released versions \"\"\" matching_versions = [\"1.5.0-0.4.el7\"] install_version =", "self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\" when only one", "TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\" when only one pre-release version", "short format and first from the new origin versioning schema - origin-3.6 \"\"\"", "origin packages within the same minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs", "openshift-ansible, which doesnt have different versioning schema, is in 3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\",", "\"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version \"\"\"", "test_pkgs): test_pkgs_objs = [] for pkg in test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch", "returned \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7'])", "result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\" when there is no duplicate", "[\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\" when both release and pre-release versions exist, only release", "\"\"\" when there is no duplicate package, return the single one \"\"\" test_pkgs", "have different versioning schema is in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\"))", "test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\"", "test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\" when the origin", "versioning schema, is in short format and in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"),", "pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs class", "vra self.pkgtup = pkgtup def __eq__(self, other): return self.__dict__ == other.__dict__ @classmethod def", "* class TestPackage(object): def __init__(self, name, version, release, epoch, vra, pkgtup): self.name =", "def test_removing_no_duplicate_package(self): \"\"\" when there is no duplicate package, return the single one", "properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\" when sorting origin packages within the same minor version", "['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\" when only one pre-release version exist, it is returned", "is in 3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when", "schema is in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\"", "single matching version is present in released versions \"\"\" matching_versions = [\"1.5.0-1.4.el7\"] install_version", "= TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\" when sorting origin", "versioning schema is in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self):", "only one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs =", "it is returned \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs,", "when multiple matching pre-release version are present in pre-released versions \"\"\" matching_versions =", "def test_sort_packages_with_different_minor_version(self): \"\"\" when sorting origin packages with different minor version \"\"\" test_pkgs", "the origin version is in short format and higher then the first version", "is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\" when", "test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is in short format and first from", "\"3.7.0\"), (\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning", "[\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj =", "test_with_single_pre_release(self): \"\"\" when only one pre-release version exist, it is returned \"\"\" test_pkgs", "\"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is", "install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple matching pre-release version are present in pre-released", "\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test for", "[\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase):", "package, return the single one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)", "class RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\" when is multiple duplicate packages,", "\"\"\" when both release and pre-release versions exist, only release version is returned", "in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\" when origin", "= [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def", "[\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj", "- origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the origin", "(\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is in short format", "\"\"\" when the origin version is in short and old versioning schema \"\"\"", "\"\"\" matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self):", "\"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): \"\"\" when only single matching", "test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\" when only one pre-release version exist, it", "'1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\" when both release and pre-release versions exist, only release", "version is higher then the first version of the new origin versioning schema", "test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def", "doesnt have different versioning schema, is in 3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\",", "old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible,", "when only one pre-release version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"]", "doesnt have different versioning schema is in 3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\",", "self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\" when there is no duplicate package, return the", "test_removing_no_duplicate_package(self): \"\"\" when there is no duplicate package, return the single one \"\"\"", "\"1.5.0\"), (\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\" when the origin version is in short", "= epoch self.vra = vra self.pkgtup = pkgtup def __eq__(self, other): return self.__dict__", "test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra = pkg_version + \"-\"", "[\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): \"\"\" when only", "pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\" when is", "version is in short format and first from the new origin versioning schema", "[\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit,", "class GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\" when only one matching version", "def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema, is in", "pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra = pkg_version + \"-\" + pkg_release + \".\"", "= TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def", "pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra = pkg_version + \"-\" +", "test_with_single_matching_pre_release_version(self): \"\"\" when only single matching pre-release version is present in pre-released versions", "def test_with_single_pre_release(self): \"\"\" when only one pre-release version exist, it is returned \"\"\"", "['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\" when both release and pre-release versions exist, only", "is present in released versions \"\"\" matching_versions = [\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions),", "test_with_multiple_matching_release_versions(self): \"\"\" when multiple matching version are present in released versions \"\"\" matching_versions", "release version is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)", "`determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\" when multiple matching version are present in released versions", "= \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when", "both release and pre-release versions exist, only release version is returned \"\"\" test_pkgs", "[] for pkg in test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra", "test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs =", "= test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\" when", "sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\" when sorting origin packages with different minor", "\"6\"), \"3.6\") def test_origin_package_with_old_schema(self): \"\"\" when origin package is in 1.5 version \"\"\"", "when origin package is in 1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\") def", "= [] for pkg in test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg)", "self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self): \"\"\" when the origin version is in", "self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different", "pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test", "\"\"\" matching_versions = [\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): \"Test for", "pkg in test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra = pkg_version", "is in the old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self):", "version exist and its pre-release, it is returned \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"]", "version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt", "= TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\",", "def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema is in", "= rpmutils.splitFilename(pkg) pkg_vra = pkg_version + \"-\" + pkg_release + \".\" + pkg_arch", "with different minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"]", "def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is the first from the new", "test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\" when both", "def test_with_muptiple_release(self): \"\"\" when both release and pre-release versions exist, only release version", "format and higher then the first version of the new origin versioning schema", "\"\"\" when sorting origin packages within the same minor version \"\"\" test_pkgs =", "sorting origin packages within the same minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"]", "pre-release version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs =", "\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\" when", "= \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple matching pre-release version are", "for pkg in test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra =", "self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple matching pre-release version are present in", "self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\" when both release and pre-release", "matching version is present in released versions \"\"\" matching_versions = [\"1.5.0-1.4.el7\"] install_version =", "\"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have", "origin version is in the old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\"))", "\"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): \"\"\" when only a", "when sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"]", "= [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj", "test_sort_packages_with_same_minor_version(self): \"\"\" when sorting origin packages within the same minor version \"\"\" test_pkgs", "and pre-release versions exist, only release versions are returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\",", "in 3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\"", "packages with different minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\",", "pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase):", "\"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting", "test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self):", "def test_removing_single_duplicate_package(self): \"\"\" when is multiple duplicate packages, return only one \"\"\" test_pkgs", "def test_with_single_matching_pre_release_version(self): \"\"\" when only single matching pre-release version is present in pre-released", "self.epoch = epoch self.vra = vra self.pkgtup = pkgtup def __eq__(self, other): return", "name, version, release, epoch, vra, pkgtup): self.name = name self.version = version self.release", "class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\" when origin package is in", "[\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple matching pre-release", "= name self.version = version self.release = release self.epoch = epoch self.vra =", "`determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\",", "pkg_arch pkg_tup = (pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch,", "\"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\" when both", "'1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\" when both release and pre-release versions exist, only", "\"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\" when origin package is in", "\"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is", "test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) if __name__", "when the origin version is in short and old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\",", "\".\" + pkg_arch pkg_tup = (pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version,", "in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible,", "self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different", "which doesnt have different versioning schema is in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"),", "self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\" when the origin version is in", "version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\" when origin package is", "= TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\" when both release", "the origin version is in the old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\",", "\"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj", "test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs =", "self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6", "for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version \"\"\"", "= release self.epoch = epoch self.vra = vra self.pkgtup = pkgtup def __eq__(self,", "when there is no duplicate package, return the single one \"\"\" test_pkgs =", "versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\" when the origin", "new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self):", "versions \"\"\" matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def", "install_version) def test_with_single_matching_pre_release_version(self): \"\"\" when only single matching pre-release version is present in", "'1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)", "from determine_install_upgrade_version import * class TestPackage(object): def __init__(self, name, version, release, epoch, vra,", "= pkgtup def __eq__(self, other): return self.__dict__ == other.__dict__ @classmethod def create_test_packages(self, test_pkgs):", "determine_install_upgrade_version import * class TestPackage(object): def __init__(self, name, version, release, epoch, vra, pkgtup):", "\"3\", \"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version", "TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self):", "\"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\" when origin package is in 3.5 version \"\"\"", "other): return self.__dict__ == other.__dict__ @classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs = [] for", "different minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj", "= [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs)", "test_origin_with_short_standard_versioning_schema(self): \"\"\" when the origin version is in short format and higher then", "is no duplicate package, return the single one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"]", "the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\")) def", "exist, only release version is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs", "\"\"\" when origin package is in 1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\")", "version are present in pre-released versions \"\"\" matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version", "\"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\") def test_origin_package_with_old_schema(self): \"\"\" when origin package is in", "in short format and in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\")) def", "self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is in", "= version self.release = release self.epoch = epoch self.vra = vra self.pkgtup =", "different versioning schema is in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\")) def", "schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\" when the origin version", "vra, pkgtup): self.name = name self.version = version self.release = release self.epoch =", "\"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\"", "when only one pre-release version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\",", "is the first from the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\",", "\"3\", \"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\" when origin package is in 3.5 version", "test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs)", "\"3\", \"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\" when multiple", "single one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2]", "one matching version exist and its pre-release, it is returned \"\"\" test_pkgs =", "are present in pre-released versions \"\"\" matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version =", "which doesnt have different versioning schema, is in short format and in 3.6", "from the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\"))", "def test_get_matching_versions(self): \"\"\" when only one matching version exist and its pre-release, it", "\"3.6\")) def test_origin_with_legacy_schema(self): \"\"\" when the origin version is in the old versioning", "packages, return only one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)", "= vra self.pkgtup = pkgtup def __eq__(self, other): return self.__dict__ == other.__dict__ @classmethod", "\"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): \"\"\" when only a single", "version is in the old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\")) def", "for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\" when multiple matching version are present in released", "unittest from determine_install_upgrade_version import * class TestPackage(object): def __init__(self, name, version, release, epoch,", "duplicate package, return the single one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs =", "test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\" when only one", "in 1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\" when origin", "\"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have", "def test_with_single_release(self): \"\"\" when both release and pre-release versions exist, only release versions", "TestPackage(object): def __init__(self, name, version, release, epoch, vra, pkgtup): self.name = name self.version", "'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\" when the", "\"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): \"\"\" when only a single matching version is", "import unittest from determine_install_upgrade_version import * class TestPackage(object): def __init__(self, name, version, release,", "when multiple matching version are present in released versions \"\"\" matching_versions = [\"1.2.0-1.el7\",", "in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible,", "version is in short and old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\"))", "have different versioning schema, is in short format and in 3.6 version \"\"\"", "return the single one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs", "it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs,", "def __eq__(self, other): return self.__dict__ == other.__dict__ @classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs =", "exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin',", "\"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self): \"\"\" when the origin version is", "version of the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\",", "when openshift-ansible, which doesnt have different versioning schema is in 3.4 version \"\"\"", "pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self):", "\"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def", "properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj)", "self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\" when only one pre-release version exist,", "\"3.6\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning", "\"\"\" when sorting origin packages with different minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\",", "test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs)", "is in short format and first from the new origin versioning schema -", "is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\") def test_origin_package_with_old_schema(self): \"\"\" when", "multiple duplicate packages, return only one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs", "test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\" when only one pre-release version exist, it", "version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self):", "DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\" when the origin version is higher", "epoch self.vra = vra self.pkgtup = pkgtup def __eq__(self, other): return self.__dict__ ==", "\"\"\" when the origin version is in the old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\",", "self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\"", "3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which", "= TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\" when sorting origin", "\"\"\" when the origin version is in short format and higher then the", "= [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def", "new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self):", "openshift-ansible, which doesnt have different versioning schema, is in short format and in", "is in short format and higher then the first version of the new", "duplicate packages, return only one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs =", "create_test_packages(self, test_pkgs): test_pkgs_objs = [] for pkg in test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch,", "when the origin version is in short format and first from the new", "test_pkgs_objs = [] for pkg in test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch =", "and first from the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"),", "def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is in short format and first", "def test_with_single_matching_release_version(self): \"\"\" when only a single matching version is present in released", "def test_non_origin_package_with_old_schema(self): \"\"\" when origin package is in 3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\",", "'1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\" when only one pre-release version exist, it is", "release, epoch, vra, pkgtup): self.name = name self.version = version self.release = release", "\"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for", "origin version is in short format and higher then the first version of", "pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test for", "minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj =", ", pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs", "version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs)", "\"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\" when", "versions are returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs,", "test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema is in 3.4", "exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj =", "= TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\"", "short format and in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self):", "= [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj)", "self.name = name self.version = version self.release = release self.epoch = epoch self.vra", "@classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs = [] for pkg in test_pkgs: pkg_name, pkg_version,", "for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\" when is multiple duplicate packages, return only one", "matching version are present in released versions \"\"\" matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"]", "TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\" when", "version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs)", "= [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase):", "__eq__(self, other): return self.__dict__ == other.__dict__ @classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs = []", "sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\" when sorting origin packages within", "versions \"\"\" matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def", "when sorting origin packages with different minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"]", "schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\" when the", "`determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\" when the origin version is higher then the first", "when openshift-ansible, which doesnt have different versioning schema, is in 3.7 version \"\"\"", "[\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\" when only", "install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\"", "(pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return", "\"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\" when the origin version is in short and old", "3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\") def test_origin_package_with_old_schema(self): \"\"\" when origin package", "= sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\" when sorting origin packages within the", "origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs =", "sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs", "\"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self):", "package is in 3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test", "\"\"\" when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\")", "test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema, is in short", "when the origin version is higher then the first version of the new", "exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs,", "test_with_muptiple_release(self): \"\"\" when both release and pre-release versions exist, only release version is", "\"3\", \"6\"), \"3.6\") def test_origin_package_with_old_schema(self): \"\"\" when origin package is in 1.5 version", "\"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj =", "\"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\"", "test_sort_packages_with_different_minor_version(self): \"\"\" when sorting origin packages with different minor version \"\"\" test_pkgs =", "\"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is the first from the", "multiple matching version are present in released versions \"\"\" matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\",", "minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj =", "\"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\",", "def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema, is in", "= [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\"", "when sorting origin packages within the same minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\",", "TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) if __name__ == '__main__':", "version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt", "def test_origin_with_short_legacy_schema(self): \"\"\" when the origin version is in short and old versioning", "test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self):", "\"\"\" when only one pre-release version exist, it is returned \"\"\" test_pkgs =", "which doesnt have different versioning schema, is in 3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"),", "\"\"\" when sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\",", "test_removing_single_duplicate_package(self): \"\"\" when is multiple duplicate packages, return only one \"\"\" test_pkgs =", "version of the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\",", "pre-released versions \"\"\" matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version)", "returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'),", "result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\"", "= [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\" when", "format and in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\"", "in 3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible,", "in released versions \"\"\" matching_versions = [\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def", "schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt", "[\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self):", "test_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"),", "released versions \"\"\" matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version)", "install_version) def test_with_single_matching_release_version(self): \"\"\" when only a single matching version is present in", "install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): \"\"\" when only a single matching", "def test_with_multiple_matching_release_versions(self): \"\"\" when multiple matching version are present in released versions \"\"\"", "multiple matching pre-release version are present in pre-released versions \"\"\" matching_versions = [\"1.2.0-0.el7\",", "def test_origin_with_standard_versioning_schema(self): \"\"\" when the origin version is higher then the first version", "\"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj =", "is present in pre-released versions \"\"\" matching_versions = [\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions),", "are present in released versions \"\"\" matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version =", "\"\"\" when only a single matching version is present in released versions \"\"\"", "its pre-release, it is returned \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)", "pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def", "matching pre-release version is present in pre-released versions \"\"\" matching_versions = [\"1.5.0-0.4.el7\"] install_version", "present in pre-released versions \"\"\" matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\"", "SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting origin packages with exceptional", "[\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj)", "self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\" when sorting origin packages within the same minor", "3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which", "the origin version is the first from the new origin versioning schema -", "is in 1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\" when", "returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def", "= [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): \"\"\" when", "TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\" when only one pre-release version", "pkgtup def __eq__(self, other): return self.__dict__ == other.__dict__ @classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs", "(\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema,", "first from the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\",", "which doesnt have different versioning schema is in 3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"),", "= pkg_version + \"-\" + pkg_release + \".\" + pkg_arch pkg_tup = (pkg_name", "test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\" when only", "pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra = pkg_version + \"-\" + pkg_release", "\"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\" when the origin version is in short format and", "versions exist, only release versions are returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs", "class SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting origin packages with", "have different versioning schema, is in 3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\"))", "\"\"\" when openshift-ansible, which doesnt have different versioning schema is in 3.6 version", "self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different", "\"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema, is", "versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when", "\"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema, is", "\"1.5\"), (\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning", "properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj)", "one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs),", "get_matching_versions, 'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\" when", "the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\")) def", "self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\" when only one pre-release version exist,", "test_get_matching_versions(self): \"\"\" when only one matching version exist and its pre-release, it is", "in short format and higher then the first version of the new origin", "\"\"\" when origin package is in 3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\")", "test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self):", "install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): \"\"\" when only single matching pre-release", "in test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra = pkg_version +", "origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version", "and in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when", "= [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5') class", "schema, is in 3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\"", "test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\" when both release", "\"\"\" when openshift-ansible, which doesnt have different versioning schema is in 3.4 version", "test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5')", "\"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have", "present in released versions \"\"\" matching_versions = [\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version)", "= TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\"", "'1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\" when both release and pre-release versions exist,", "when the origin version is the first from the new origin versioning schema", "\"Test for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\" when the origin version is higher then", "[\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj)", "origin version is the first from the new origin versioning schema - origin-3.6", "test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\",", "[\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self):", "release self.epoch = epoch self.vra = vra self.pkgtup = pkgtup def __eq__(self, other):", "origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\"", "single matching pre-release version is present in pre-released versions \"\"\" matching_versions = [\"1.5.0-0.4.el7\"]", "test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self):", "self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): \"\"\" when only single matching pre-release version is present", "package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\") def test_origin_package_with_old_schema(self): \"\"\"", "the first from the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"),", "\"Test for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\" when is multiple duplicate packages, return only", "`determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\" when is multiple duplicate packages, return only one \"\"\"", "= TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\" when there is", "\"3.6\"), (\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self): \"\"\" when the origin version is in the", "= [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): \"\"\" when", "pre-release version are present in pre-released versions \"\"\" matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\", \"1.2.5-0.el7\"]", "3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\" when origin package", "self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\" when sorting origin packages with different minor version", "release versions are returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin',", "within the same minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\",", "test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\" when there", "self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\" when", "first from the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\",", "the first version of the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\",", "self.version = version self.release = release self.epoch = epoch self.vra = vra self.pkgtup", "install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple matching pre-release version", "\"3.6.0\"), (\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is in short", "= TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): \"\"\" when only one pre-release", "TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self):", "GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\" when multiple matching version are present", "packages with exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs = [\"origin-3.6.0-0.0.alpha.0.1.el7\", \"origin-3.6.0-0.alpha.0.2.el7\"] properly_sorted_pkgs = [\"origin-3.6.0-0.alpha.0.2.el7\"]", "properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\" when sorting origin packages with different minor version \"\"\"", "test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj =", "def test_origin_package_with_old_schema(self): \"\"\" when origin package is in 1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\",", "the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\")) def", "test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\"", "and pre-release versions exist, only release version is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\",", "version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\" when origin package is", "\"\"\" when multiple matching version are present in released versions \"\"\" matching_versions =", "version is present in released versions \"\"\" matching_versions = [\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\"", "no duplicate package, return the single one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs", "of the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\"))", "\"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema is", "def test_origin_with_short_standard_versioning_schema(self): \"\"\" when the origin version is in short format and higher", "\"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj =", "name self.version = version self.release = release self.epoch = epoch self.vra = vra", "origin package is in 1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"5\"), \"1.5\") def test_non_origin_package_with_new_schema(self):", "self.__dict__ == other.__dict__ @classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs = [] for pkg in", "[\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\" when", "when the origin version is in the old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"),", "only one pre-release version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"]", "pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\"", "matching pre-release version are present in pre-released versions \"\"\" matching_versions = [\"1.2.0-0.el7\", \"1.2.2-0.el7\",", "\"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj", "3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which", "= TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): \"\"\" when only one pre-release", "test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs,", "pre-release version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)", "versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible, which", "install_version) class SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting origin packages", "\"\"\" when only one matching version exist and its pre-release, it is returned", "sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): \"\"\" when sorting origin packages with", "test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple matching pre-release version are present in pre-released versions \"\"\"", "TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\" when there is no", "= [\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple matching", "and higher then the first version of the new origin versioning schema -", "test_origin_with_standard_versioning_schema(self): \"\"\" when the origin version is higher then the first version of", "sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\" when sorting origin packages within the same", "3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def", "and old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when", "is in 3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test for", "is in short format and in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\"))", "GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\" when only one matching version exist", "= test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\" when there is no duplicate package,", "\"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): \"Test", "pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra = pkg_version + \"-\" + pkg_release +", "format and first from the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\",", "def test_non_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\",", "when the origin version is in short format and higher then the first", "matching version exist and its pre-release, it is returned \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\",", "\"Test for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\" when multiple matching version are present in", "present in released versions \"\"\" matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\"", "\"3.6\") def test_origin_package_with_old_schema(self): \"\"\" when origin package is in 1.5 version \"\"\" self.assertEqual(schema_change_check(\"origin\",", "version is present in pre-released versions \"\"\" matching_versions = [\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\"", "schema is in 3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test", "class GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\" when multiple matching version are", "of the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\"))", "properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) if __name__ == '__main__': unittest.main()", "have different versioning schema is in 3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.5.0\"), (\"3.4\", \"3.5\"))", "pre-release, it is returned \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin',", "TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs = [\"origin-1.2.0-0.4.el7.x86_64\", \"origin-1.3.0-1.1.el7.x86_64\", \"origin-1.4.2-1.1.el7.x86_64\"]", "- origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the origin", "= \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): \"\"\" when only single matching pre-release version", "[\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\" when", "self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different", "is multiple duplicate packages, return only one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"]", "= TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def", "origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\" when the origin version", "self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\" when the origin version is in", "self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\" when", "origin version is higher then the first version of the new origin versioning", "pkg_arch = rpmutils.splitFilename(pkg) pkg_vra = pkg_version + \"-\" + pkg_release + \".\" +", "origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self):", "is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7'])", "when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\") def", "self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting origin", "origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self): \"\"\"", "TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\" when", "\"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_with_multiple_matching_release_versions(self): \"\"\" when multiple matching", "def __init__(self, name, version, release, epoch, vra, pkgtup): self.name = name self.version =", "both release and pre-release versions exist, only release versions are returned \"\"\" test_pkgs", "exist and its pre-release, it is returned \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs", "\"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\" when the origin version is", "when openshift-ansible, which doesnt have different versioning schema, is in short format and", "[\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): \"\"\"", "are returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'),", "version is in short format and higher then the first version of the", "openshift-ansible, which doesnt have different versioning schema is in 3.4 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\",", "SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6", "test_non_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"),", "\"\"\" when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\")", "test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"])", "- origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\" when the origin", "versions \"\"\" matching_versions = [\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): \"Test", "test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class", "[\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.4.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj", "only release versions are returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)", "other.__dict__ @classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs = [] for pkg in test_pkgs: pkg_name,", "is in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6.0\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when", "\"\"\" when openshift-ansible, which doesnt have different versioning schema, is in short format", "import * class TestPackage(object): def __init__(self, name, version, release, epoch, vra, pkgtup): self.name", "the origin version is higher then the first version of the new origin", "only a single matching version is present in released versions \"\"\" matching_versions =", "\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\"", "(\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self): \"\"\" when the origin version is in the old", "TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): \"\"\" when sorting origin packages", "version is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin',", "\"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs =", "self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\" when both release and pre-release versions", "test_origin_with_legacy_schema(self): \"\"\" when the origin version is in the old versioning schema \"\"\"", "def test_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\",", "`determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\"", "in short and old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self):", "schema, is in short format and in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.6\"), (\"3.5\",", "pkg_tup = (pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra,", "in pre-released versions \"\"\" matching_versions = [\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class", "only release version is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs =", "present in pre-released versions \"\"\" matching_versions = [\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version)", "\"-\" + pkg_release + \".\" + pkg_arch pkg_tup = (pkg_name , pkg_arch, pkg_epoch,", "test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is the first from the new origin", "schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self): \"\"\" when the", "\"\"\" when openshift-ansible, which doesnt have different versioning schema, is in 3.7 version", "[\"origin-3.6.0-0.alpha.0.2.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def", "- origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\")) def test_origin_with_legacy_schema(self): \"\"\" when the origin", "from the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6\"), (\"1.5\", \"3.6\"))", "is higher then the first version of the new origin versioning schema -", "RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\" when is multiple duplicate packages, return", "the single one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs =", "is in short and old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\")) def", "exist, only release versions are returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs =", "released versions \"\"\" matching_versions = [\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self):", "test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj =", "__init__(self, name, version, release, epoch, vra, pkgtup): self.name = name self.version = version", "\"Test for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1", "\"Test for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version", "pkg_version + \"-\" + pkg_release + \".\" + pkg_arch pkg_tup = (pkg_name ,", "schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when the", "def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package \"\"\" test_pkgs", "when both release and pre-release versions exist, only release version is returned \"\"\"", "pre-released versions \"\"\" matching_versions = [\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase):", "different versioning schema, is in short format and in 3.6 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\",", "epoch, vra, pkgtup): self.name = name self.version = version self.release = release self.epoch", "for `determine_install_upgrade_version.py`\" def test_sort_packages_with_exceptional_origin_pkg(self): \"\"\" when sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package", "for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\" when only one matching version exist and its", "\"1.5\") def test_non_origin_package_with_new_schema(self): \"\"\" when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\",", "\"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\" when multiple matching pre-release version are present", "origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\"", "is returned \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'),", "class DetermineSearchVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_with_standard_versioning_schema(self): \"\"\" when the origin version is", "version are present in released versions \"\"\" matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version", "new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self):", "origin version is in short format and first from the new origin versioning", "\"3.5.0\"), (\"3.4\", \"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\" when origin", "\"3.5\")) class SchemaChangeCheckTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_origin_package_with_new_schema(self): \"\"\" when origin package is", "\"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): \"\"\" when only single matching pre-release version is", "\"1.2.2-0.el7\", \"1.2.5-0.el7\"] install_version = \"1.2.5-0.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): \"\"\" when only single", "\"\"\" matching_versions = [\"1.5.0-1.4.el7\"] install_version = \"1.5.0-1.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): \"\"\" when", "short and old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\"", "test_with_single_release(self): \"\"\" when both release and pre-release versions exist, only release versions are", "pkg_vra = pkg_version + \"-\" + pkg_release + \".\" + pkg_arch pkg_tup =", "\"\"\" when the origin version is the first from the new origin versioning", "(\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\" when the origin version is in short and", "origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.6.0\"), (\"1.5\", \"3.6\")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): \"\"\"", "doesnt have different versioning schema, is in short format and in 3.6 version", "+ pkg_arch pkg_tup = (pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release,", "[\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): \"\"\" when only", "origin package is in 3.5 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"5\"), \"3.5\") class GetLastVersionTestCase(unittest.TestCase):", "in short format and first from the new origin versioning schema - origin-3.6", "self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): \"\"\" when only a single matching version is present", "pre-release versions exist, only release versions are returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"]", "result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_get_matching_versions(self): \"\"\" when only one matching", "test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\" when both release and pre-release versions exist,", "different versioning schema, is in 3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\")) def", "matching_versions = [\"1.5.0-0.4.el7\"] install_version = \"1.5.0-0.4.el7\" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\"", "schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the", "(\"3.6\", \"3.7\")) def test_origin_with_standard_to_legacy_versioning_schema(self): \"\"\" when the origin version is the first from", "the old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\" when", "the same minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.5.0-1.el7\"] properly_sorted_pkgs = [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"]", "\"\"\" when both release and pre-release versions exist, only release versions are returned", "when is multiple duplicate packages, return only one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\",", "in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"origin\", \"3\", \"6\"), \"3.6\") def test_origin_package_with_old_schema(self): \"\"\" when origin", "TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\" when both release and pre-release", "one \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2]", "versioning schema, is in 3.7 version \"\"\" self.assertEqual(determine_search_versions(\"openshift-ansible\", \"3.7.0\"), (\"3.6\", \"3.7\")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self):", "when origin package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\") def", "\"3.6.0\"), (\"3.5\", \"3.6\")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning", "in the old versioning schema \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5.0\"), (\"1.4\", \"1.5\")) def test_origin_with_short_legacy_schema(self): \"\"\"", "= \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): \"\"\" when only a single matching version", "[\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self):", "= [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.0-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\"]) def test_with_muptiple_release(self): \"\"\"", "package is in 3.6 version \"\"\" self.assertEqual(schema_change_check(\"openshift-ansible\", \"3\", \"6\"), \"3.6\") def test_non_origin_package_with_old_schema(self): \"\"\"", "result_pkgs_objs) def test_removing_no_duplicate_package(self): \"\"\" when there is no duplicate package, return the single", "\"\"\" self.assertEqual(determine_search_versions(\"origin\", \"1.5\"), (\"1.4\", \"1.5\")) def test_openshift_ansible_with_standard_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have", "= [\"origin-1.5.0-1.el7\", \"origin-1.5.1-1.el7\"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj,", "only one pre-release version exist, it is returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs", "the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7\"), (\"3.6\", \"3.7\")) def", "self.vra = vra self.pkgtup = pkgtup def __eq__(self, other): return self.__dict__ == other.__dict__", "and its pre-release, it is returned \"\"\" test_pkgs = [\"origin-1.4.1-1.el7.x86_64\", \"origin-1.5.0-0.4.el7.x86_64\"] test_pkgs_objs =", "\"origin-1.5.0-1.1.el7.x86_64\", \"origin-1.5.2-1.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), [\"1.5.0-1.1.el7\", \"1.5.2-1.1.el7\"]) def test_with_no_matches(self): test_pkgs", "\"\"\" when only single matching pre-release version is present in pre-released versions \"\"\"", "returned \"\"\" test_pkgs = [\"origin-1.5.0-0.4.el7.x86_64\", \"origin-1.5.2-0.1.el7.x86_64\"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7',", "then the first version of the new origin versioning schema - origin-3.6 \"\"\"", "in released versions \"\"\" matching_versions = [\"1.2.0-1.el7\", \"1.2.2-1.el7\", \"1.2.5-1.el7\"] install_version = \"1.2.5-1.el7\" self.assertEqual(get_last_version(matching_versions),", "test_with_multiple_pre_release(self): \"\"\" when only one pre-release version exist, it is returned \"\"\" test_pkgs", "first version of the new origin versioning schema - origin-3.6 \"\"\" self.assertEqual(determine_search_versions(\"origin\", \"3.7.0\"),", "\"3.7.0\"), (\"3.6\", \"3.7\")) def test_origin_with_short_standard_versioning_schema(self): \"\"\" when the origin version is in short", "def test_openshift_ansible_with_legacy_versioning_schema(self): \"\"\" when openshift-ansible, which doesnt have different versioning schema is in", "\"\"\" when multiple matching pre-release version are present in pre-released versions \"\"\" matching_versions", "the origin version is in short format and first from the new origin", "return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): \"Test for `determine_install_upgrade_version.py`\" def test_removing_single_duplicate_package(self): \"\"\" when is multiple", "sorting origin packages with different minor version \"\"\" test_pkgs = [\"origin-1.5.1-1.el7\", \"origin-1.4.0-1.el7\"] properly_sorted_pkgs" ]
[ "welcome teksta welcome_text_size = welcome_text.get_rect() welcome_image = pygame.image.load( \\ \"shark.jpg\") hamster = pygame.image.load(\"hamster.png\")", "hamster = pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster, \\ (100, 100) ) #definiranje novog ekrana", "HEIGHT) #definiranje boja - guglaj colorpicker WHITE= ( 255, 255, 255) BLACK= (", "+= 1 if hamster_time < 0 : game_state = \"game_over\" screen.fill(WHITE) hamster_pos =", "not done: #event petlja for event in pygame.event.get(): if event.type == pygame.QUIT: done", "if hit: hamster_time = 3000 hamster_x = random.randint(20, WIDTH) hamster_y = random.randint(20, HEIGHT)", "0, 0, 0 ) BLUE = (0, 0, 255) #Renderiranje pozdravnog teksta myfont", "# ukljucivanje biblioteke pygame import pygame import random #inicijalizacija pygame pygame.init() #inicijalizacije fontova", "False hamster_time = 3000 hamster_x, hamster_y = 100, 100 score = 0 while", "pygame.event.get(): if event.type == pygame.QUIT: done = True elif event.type == pygame.KEYDOWN: if", "event.pos if hamster_pos.collidepoint(pos): hit = True if game_state == \"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0)", "definiranje konstanti za velicinu prozora WIDTH = 1024 HEIGHT = 600 # tuple", "hamster_time = 3000 hamster_x, hamster_y = 100, 100 score = 0 while not", "\"welcome\" done = False hit = False hamster_time = 3000 hamster_x, hamster_y =", "100) ) elif game_state == \"game\": if hit: hamster_time = 3000 hamster_x =", "= pygame.time.Clock() game_state = \"welcome\" done = False hit = False hamster_time =", "True if game_state == \"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text, ( 100, 100)", "event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: game_state = \"game\" elif event.type ==", "za velicinu prozora WIDTH = 1024 HEIGHT = 600 # tuple velicine prozora", "biblioteke pygame import pygame import random #inicijalizacija pygame pygame.init() #inicijalizacije fontova pygame.font.init() #", "hamster_time < 0 : game_state = \"game_over\" screen.fill(WHITE) hamster_pos = screen.blit(hamster, \\ (hamster_x,", "fontova pygame.font.init() # definiranje konstanti za velicinu prozora WIDTH = 1024 HEIGHT =", "hamster_time = hamster_time - clock.get_time() elif game_state == \"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko je", "while not done: #event petlja for event in pygame.event.get(): if event.type == pygame.QUIT:", "== pygame.MOUSEBUTTONDOWN: if game_state == \"game\": pos = event.pos if hamster_pos.collidepoint(pos): hit =", "screen.fill(WHITE) screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text, ( 100, 100) ) elif game_state == \"game\":", "HEIGHT = 600 # tuple velicine prozora size = (WIDTH, HEIGHT) #definiranje boja", "3000 hamster_x, hamster_y = 100, 100 score = 0 while not done: #event", "#definiranje naziva prozora pygame.display.set_caption(\"Nasa kul igra\") clock = pygame.time.Clock() game_state = \"welcome\" done", "game_state == \"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko je potrebno ceka do iscrtavanja #iduceg framea", "if game_state == \"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text, ( 100, 100) )", "hamster_pos.collidepoint(pos): hit = True if game_state == \"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text,", "1024 HEIGHT = 600 # tuple velicine prozora size = (WIDTH, HEIGHT) #definiranje", "#ukoliko je potrebno ceka do iscrtavanja #iduceg framea kako bi imao 60fpsa clock.tick(60)", "igru screen = pygame.display.set_mode(size) #definiranje naziva prozora pygame.display.set_caption(\"Nasa kul igra\") clock = pygame.time.Clock()", "WIDTH = 1024 HEIGHT = 600 # tuple velicine prozora size = (WIDTH,", "pygame pygame.init() #inicijalizacije fontova pygame.font.init() # definiranje konstanti za velicinu prozora WIDTH =", "# tuple velicine prozora size = (WIDTH, HEIGHT) #definiranje boja - guglaj colorpicker", "0 : game_state = \"game_over\" screen.fill(WHITE) hamster_pos = screen.blit(hamster, \\ (hamster_x, hamster_y)) score_text", "pygame import pygame import random #inicijalizacija pygame pygame.init() #inicijalizacije fontova pygame.font.init() # definiranje", "pygame.transform.scale(hamster, \\ (100, 100) ) #definiranje novog ekrana za igru screen = pygame.display.set_mode(size)", "hit = False score += 1 if hamster_time < 0 : game_state =", "<reponame>Levara/pygame-hamster # ukljucivanje biblioteke pygame import pygame import random #inicijalizacija pygame pygame.init() #inicijalizacije", "colorpicker WHITE= ( 255, 255, 255) BLACK= ( 0, 0, 0 ) BLUE", "0, 0 ) BLUE = (0, 0, 255) #Renderiranje pozdravnog teksta myfont =", "petlja for event in pygame.event.get(): if event.type == pygame.QUIT: done = True elif", "for event in pygame.event.get(): if event.type == pygame.QUIT: done = True elif event.type", "pygame.font.SysFont('Arial', 30) welcome_text = myfont.render(\"Dobrodosli!\", \\ False, BLUE) #daj mi velicinu welcome teksta", "= pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster, \\ (100, 100) ) #definiranje novog ekrana za", "score = 0 while not done: #event petlja for event in pygame.event.get(): if", "igra\") clock = pygame.time.Clock() game_state = \"welcome\" done = False hit = False", "\"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text, ( 100, 100) ) elif game_state ==", "#Renderiranje pozdravnog teksta myfont = pygame.font.SysFont('Arial', 30) welcome_text = myfont.render(\"Dobrodosli!\", \\ False, BLUE)", "game_state = \"welcome\" done = False hit = False hamster_time = 3000 hamster_x,", "HEIGHT) hit = False score += 1 if hamster_time < 0 : game_state", "game_state = \"game\" elif event.type == pygame.MOUSEBUTTONDOWN: if game_state == \"game\": pos =", "= 600 # tuple velicine prozora size = (WIDTH, HEIGHT) #definiranje boja -", "100, 100 score = 0 while not done: #event petlja for event in", "\\ (100, 100) ) #definiranje novog ekrana za igru screen = pygame.display.set_mode(size) #definiranje", "= \"welcome\" done = False hit = False hamster_time = 3000 hamster_x, hamster_y", "if game_state == \"game\": pos = event.pos if hamster_pos.collidepoint(pos): hit = True if", "prozora pygame.display.set_caption(\"Nasa kul igra\") clock = pygame.time.Clock() game_state = \"welcome\" done = False", "kul igra\") clock = pygame.time.Clock() game_state = \"welcome\" done = False hit =", "pygame.init() #inicijalizacije fontova pygame.font.init() # definiranje konstanti za velicinu prozora WIDTH = 1024", "= pygame.display.set_mode(size) #definiranje naziva prozora pygame.display.set_caption(\"Nasa kul igra\") clock = pygame.time.Clock() game_state =", "event.key == pygame.K_SPACE: game_state = \"game\" elif event.type == pygame.MOUSEBUTTONDOWN: if game_state ==", "hamster_x, hamster_y = 100, 100 score = 0 while not done: #event petlja", "elif event.type == pygame.MOUSEBUTTONDOWN: if game_state == \"game\": pos = event.pos if hamster_pos.collidepoint(pos):", "3000 hamster_x = random.randint(20, WIDTH) hamster_y = random.randint(20, HEIGHT) hit = False score", "600 # tuple velicine prozora size = (WIDTH, HEIGHT) #definiranje boja - guglaj", "False, BLUE) screen.blit(score_text, (10, 10) ) hamster_time = hamster_time - clock.get_time() elif game_state", "mi velicinu welcome teksta welcome_text_size = welcome_text.get_rect() welcome_image = pygame.image.load( \\ \"shark.jpg\") hamster", "= 0 while not done: #event petlja for event in pygame.event.get(): if event.type", "BLUE = (0, 0, 255) #Renderiranje pozdravnog teksta myfont = pygame.font.SysFont('Arial', 30) welcome_text", "\\ myfont.render(\"Score: %d!\"%score, \\ False, BLUE) screen.blit(score_text, (10, 10) ) hamster_time = hamster_time", "velicine prozora size = (WIDTH, HEIGHT) #definiranje boja - guglaj colorpicker WHITE= (", "= False hamster_time = 3000 hamster_x, hamster_y = 100, 100 score = 0", "if hamster_pos.collidepoint(pos): hit = True if game_state == \"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0) )", "#event petlja for event in pygame.event.get(): if event.type == pygame.QUIT: done = True", "#daj mi velicinu welcome teksta welcome_text_size = welcome_text.get_rect() welcome_image = pygame.image.load( \\ \"shark.jpg\")", "if event.type == pygame.QUIT: done = True elif event.type == pygame.KEYDOWN: if event.key", "event.type == pygame.QUIT: done = True elif event.type == pygame.KEYDOWN: if event.key ==", "== \"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text, ( 100, 100) ) elif game_state", "( 255, 255, 255) BLACK= ( 0, 0, 0 ) BLUE = (0,", "\\ False, BLUE) #daj mi velicinu welcome teksta welcome_text_size = welcome_text.get_rect() welcome_image =", "random #inicijalizacija pygame pygame.init() #inicijalizacije fontova pygame.font.init() # definiranje konstanti za velicinu prozora", "hamster_x = random.randint(20, WIDTH) hamster_y = random.randint(20, HEIGHT) hit = False score +=", "guglaj colorpicker WHITE= ( 255, 255, 255) BLACK= ( 0, 0, 0 )", "BLACK= ( 0, 0, 0 ) BLUE = (0, 0, 255) #Renderiranje pozdravnog", "= 100, 100 score = 0 while not done: #event petlja for event", "screen.fill(WHITE) hamster_pos = screen.blit(hamster, \\ (hamster_x, hamster_y)) score_text = \\ myfont.render(\"Score: %d!\"%score, \\", "= \\ myfont.render(\"Score: %d!\"%score, \\ False, BLUE) screen.blit(score_text, (10, 10) ) hamster_time =", "welcome_image = pygame.image.load( \\ \"shark.jpg\") hamster = pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster, \\ (100,", "za igru screen = pygame.display.set_mode(size) #definiranje naziva prozora pygame.display.set_caption(\"Nasa kul igra\") clock =", "pygame.QUIT: done = True elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: game_state", "pozdravnog teksta myfont = pygame.font.SysFont('Arial', 30) welcome_text = myfont.render(\"Dobrodosli!\", \\ False, BLUE) #daj", "== \"game\": pos = event.pos if hamster_pos.collidepoint(pos): hit = True if game_state ==", "teksta myfont = pygame.font.SysFont('Arial', 30) welcome_text = myfont.render(\"Dobrodosli!\", \\ False, BLUE) #daj mi", "done = True elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: game_state =", "= pygame.image.load( \\ \"shark.jpg\") hamster = pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster, \\ (100, 100)", "== pygame.K_SPACE: game_state = \"game\" elif event.type == pygame.MOUSEBUTTONDOWN: if game_state == \"game\":", "pygame.K_SPACE: game_state = \"game\" elif event.type == pygame.MOUSEBUTTONDOWN: if game_state == \"game\": pos", "\"game\": pos = event.pos if hamster_pos.collidepoint(pos): hit = True if game_state == \"welcome\":", "WIDTH) hamster_y = random.randint(20, HEIGHT) hit = False score += 1 if hamster_time", "hit = True if game_state == \"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text, (", "import random #inicijalizacija pygame pygame.init() #inicijalizacije fontova pygame.font.init() # definiranje konstanti za velicinu", "game_state == \"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text, ( 100, 100) ) elif", "elif game_state == \"game\": if hit: hamster_time = 3000 hamster_x = random.randint(20, WIDTH)", "pygame.image.load( \\ \"shark.jpg\") hamster = pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster, \\ (100, 100) )", "welcome_text_size = welcome_text.get_rect() welcome_image = pygame.image.load( \\ \"shark.jpg\") hamster = pygame.image.load(\"hamster.png\") hamster =", "= welcome_text.get_rect() welcome_image = pygame.image.load( \\ \"shark.jpg\") hamster = pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster,", "pygame.display.set_mode(size) #definiranje naziva prozora pygame.display.set_caption(\"Nasa kul igra\") clock = pygame.time.Clock() game_state = \"welcome\"", "pygame.MOUSEBUTTONDOWN: if game_state == \"game\": pos = event.pos if hamster_pos.collidepoint(pos): hit = True", "# definiranje konstanti za velicinu prozora WIDTH = 1024 HEIGHT = 600 #", "score += 1 if hamster_time < 0 : game_state = \"game_over\" screen.fill(WHITE) hamster_pos", "\"game\" elif event.type == pygame.MOUSEBUTTONDOWN: if game_state == \"game\": pos = event.pos if", "game_state == \"game\": pos = event.pos if hamster_pos.collidepoint(pos): hit = True if game_state", "myfont.render(\"Dobrodosli!\", \\ False, BLUE) #daj mi velicinu welcome teksta welcome_text_size = welcome_text.get_rect() welcome_image", "(10, 10) ) hamster_time = hamster_time - clock.get_time() elif game_state == \"game_over\": screen.fill(BLACK)", "= \"game_over\" screen.fill(WHITE) hamster_pos = screen.blit(hamster, \\ (hamster_x, hamster_y)) score_text = \\ myfont.render(\"Score:", "True elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: game_state = \"game\" elif", "\"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko je potrebno ceka do iscrtavanja #iduceg framea kako bi", ") elif game_state == \"game\": if hit: hamster_time = 3000 hamster_x = random.randint(20,", "hamster = pygame.transform.scale(hamster, \\ (100, 100) ) #definiranje novog ekrana za igru screen", "- guglaj colorpicker WHITE= ( 255, 255, 255) BLACK= ( 0, 0, 0", "if event.key == pygame.K_SPACE: game_state = \"game\" elif event.type == pygame.MOUSEBUTTONDOWN: if game_state", "%d!\"%score, \\ False, BLUE) screen.blit(score_text, (10, 10) ) hamster_time = hamster_time - clock.get_time()", "pos = event.pos if hamster_pos.collidepoint(pos): hit = True if game_state == \"welcome\": screen.fill(WHITE)", "\"game_over\" screen.fill(WHITE) hamster_pos = screen.blit(hamster, \\ (hamster_x, hamster_y)) score_text = \\ myfont.render(\"Score: %d!\"%score,", ") hamster_time = hamster_time - clock.get_time() elif game_state == \"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko", "255) BLACK= ( 0, 0, 0 ) BLUE = (0, 0, 255) #Renderiranje", "= screen.blit(hamster, \\ (hamster_x, hamster_y)) score_text = \\ myfont.render(\"Score: %d!\"%score, \\ False, BLUE)", "if hamster_time < 0 : game_state = \"game_over\" screen.fill(WHITE) hamster_pos = screen.blit(hamster, \\", "1 if hamster_time < 0 : game_state = \"game_over\" screen.fill(WHITE) hamster_pos = screen.blit(hamster,", "prozora size = (WIDTH, HEIGHT) #definiranje boja - guglaj colorpicker WHITE= ( 255,", "= True if game_state == \"welcome\": screen.fill(WHITE) screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text, ( 100,", "(0, 0, 255) #Renderiranje pozdravnog teksta myfont = pygame.font.SysFont('Arial', 30) welcome_text = myfont.render(\"Dobrodosli!\",", "pygame import random #inicijalizacija pygame pygame.init() #inicijalizacije fontova pygame.font.init() # definiranje konstanti za", "novog ekrana za igru screen = pygame.display.set_mode(size) #definiranje naziva prozora pygame.display.set_caption(\"Nasa kul igra\")", "False hit = False hamster_time = 3000 hamster_x, hamster_y = 100, 100 score", "= event.pos if hamster_pos.collidepoint(pos): hit = True if game_state == \"welcome\": screen.fill(WHITE) screen.blit(welcome_image,", "BLUE) screen.blit(score_text, (10, 10) ) hamster_time = hamster_time - clock.get_time() elif game_state ==", "pygame.font.init() # definiranje konstanti za velicinu prozora WIDTH = 1024 HEIGHT = 600", "== pygame.QUIT: done = True elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE:", "(100, 100) ) #definiranje novog ekrana za igru screen = pygame.display.set_mode(size) #definiranje naziva", "pygame.display.set_caption(\"Nasa kul igra\") clock = pygame.time.Clock() game_state = \"welcome\" done = False hit", "welcome_text.get_rect() welcome_image = pygame.image.load( \\ \"shark.jpg\") hamster = pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster, \\", "= True elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: game_state = \"game\"", "= False hit = False hamster_time = 3000 hamster_x, hamster_y = 100, 100", "event in pygame.event.get(): if event.type == pygame.QUIT: done = True elif event.type ==", "hamster_y = random.randint(20, HEIGHT) hit = False score += 1 if hamster_time <", "clock.get_time() elif game_state == \"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko je potrebno ceka do iscrtavanja", "myfont.render(\"Score: %d!\"%score, \\ False, BLUE) screen.blit(score_text, (10, 10) ) hamster_time = hamster_time -", "teksta welcome_text_size = welcome_text.get_rect() welcome_image = pygame.image.load( \\ \"shark.jpg\") hamster = pygame.image.load(\"hamster.png\") hamster", "\\ (hamster_x, hamster_y)) score_text = \\ myfont.render(\"Score: %d!\"%score, \\ False, BLUE) screen.blit(score_text, (10,", "100 score = 0 while not done: #event petlja for event in pygame.event.get():", "(0,0) ) screen.blit(welcome_text, ( 100, 100) ) elif game_state == \"game\": if hit:", "False score += 1 if hamster_time < 0 : game_state = \"game_over\" screen.fill(WHITE)", "hamster_time - clock.get_time() elif game_state == \"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko je potrebno ceka", "event.type == pygame.MOUSEBUTTONDOWN: if game_state == \"game\": pos = event.pos if hamster_pos.collidepoint(pos): hit", "pygame.display.flip() #ukoliko je potrebno ceka do iscrtavanja #iduceg framea kako bi imao 60fpsa", "game_state == \"game\": if hit: hamster_time = 3000 hamster_x = random.randint(20, WIDTH) hamster_y", "WHITE= ( 255, 255, 255) BLACK= ( 0, 0, 0 ) BLUE =", "screen = pygame.display.set_mode(size) #definiranje naziva prozora pygame.display.set_caption(\"Nasa kul igra\") clock = pygame.time.Clock() game_state", "\"game\": if hit: hamster_time = 3000 hamster_x = random.randint(20, WIDTH) hamster_y = random.randint(20,", "= myfont.render(\"Dobrodosli!\", \\ False, BLUE) #daj mi velicinu welcome teksta welcome_text_size = welcome_text.get_rect()", "0, 255) #Renderiranje pozdravnog teksta myfont = pygame.font.SysFont('Arial', 30) welcome_text = myfont.render(\"Dobrodosli!\", \\", "clock = pygame.time.Clock() game_state = \"welcome\" done = False hit = False hamster_time", "ekrana za igru screen = pygame.display.set_mode(size) #definiranje naziva prozora pygame.display.set_caption(\"Nasa kul igra\") clock", "hit = False hamster_time = 3000 hamster_x, hamster_y = 100, 100 score =", "= 3000 hamster_x, hamster_y = 100, 100 score = 0 while not done:", ") #definiranje novog ekrana za igru screen = pygame.display.set_mode(size) #definiranje naziva prozora pygame.display.set_caption(\"Nasa", "velicinu welcome teksta welcome_text_size = welcome_text.get_rect() welcome_image = pygame.image.load( \\ \"shark.jpg\") hamster =", "ukljucivanje biblioteke pygame import pygame import random #inicijalizacija pygame pygame.init() #inicijalizacije fontova pygame.font.init()", "== \"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko je potrebno ceka do iscrtavanja #iduceg framea kako", "30) welcome_text = myfont.render(\"Dobrodosli!\", \\ False, BLUE) #daj mi velicinu welcome teksta welcome_text_size", "screen.blit(welcome_text, ( 100, 100) ) elif game_state == \"game\": if hit: hamster_time =", "size = (WIDTH, HEIGHT) #definiranje boja - guglaj colorpicker WHITE= ( 255, 255,", "pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster, \\ (100, 100) ) #definiranje novog ekrana za igru", "( 100, 100) ) elif game_state == \"game\": if hit: hamster_time = 3000", "boja - guglaj colorpicker WHITE= ( 255, 255, 255) BLACK= ( 0, 0,", "= False score += 1 if hamster_time < 0 : game_state = \"game_over\"", ": game_state = \"game_over\" screen.fill(WHITE) hamster_pos = screen.blit(hamster, \\ (hamster_x, hamster_y)) score_text =", "255, 255, 255) BLACK= ( 0, 0, 0 ) BLUE = (0, 0,", "elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: game_state = \"game\" elif event.type", "hit: hamster_time = 3000 hamster_x = random.randint(20, WIDTH) hamster_y = random.randint(20, HEIGHT) hit", "0 while not done: #event petlja for event in pygame.event.get(): if event.type ==", "#inicijalizacije fontova pygame.font.init() # definiranje konstanti za velicinu prozora WIDTH = 1024 HEIGHT", "= 1024 HEIGHT = 600 # tuple velicine prozora size = (WIDTH, HEIGHT)", "random.randint(20, HEIGHT) hit = False score += 1 if hamster_time < 0 :", "< 0 : game_state = \"game_over\" screen.fill(WHITE) hamster_pos = screen.blit(hamster, \\ (hamster_x, hamster_y))", "hamster_pos = screen.blit(hamster, \\ (hamster_x, hamster_y)) score_text = \\ myfont.render(\"Score: %d!\"%score, \\ False,", "import pygame import random #inicijalizacija pygame pygame.init() #inicijalizacije fontova pygame.font.init() # definiranje konstanti", "( 0, 0, 0 ) BLUE = (0, 0, 255) #Renderiranje pozdravnog teksta", "(WIDTH, HEIGHT) #definiranje boja - guglaj colorpicker WHITE= ( 255, 255, 255) BLACK=", "= pygame.font.SysFont('Arial', 30) welcome_text = myfont.render(\"Dobrodosli!\", \\ False, BLUE) #daj mi velicinu welcome", "= \"game\" elif event.type == pygame.MOUSEBUTTONDOWN: if game_state == \"game\": pos = event.pos", "pygame.time.Clock() game_state = \"welcome\" done = False hit = False hamster_time = 3000", "in pygame.event.get(): if event.type == pygame.QUIT: done = True elif event.type == pygame.KEYDOWN:", "done: #event petlja for event in pygame.event.get(): if event.type == pygame.QUIT: done =", "== pygame.KEYDOWN: if event.key == pygame.K_SPACE: game_state = \"game\" elif event.type == pygame.MOUSEBUTTONDOWN:", "screen.blit(hamster, \\ (hamster_x, hamster_y)) score_text = \\ myfont.render(\"Score: %d!\"%score, \\ False, BLUE) screen.blit(score_text,", "(hamster_x, hamster_y)) score_text = \\ myfont.render(\"Score: %d!\"%score, \\ False, BLUE) screen.blit(score_text, (10, 10)", "100, 100) ) elif game_state == \"game\": if hit: hamster_time = 3000 hamster_x", "tuple velicine prozora size = (WIDTH, HEIGHT) #definiranje boja - guglaj colorpicker WHITE=", "== \"game\": if hit: hamster_time = 3000 hamster_x = random.randint(20, WIDTH) hamster_y =", "= random.randint(20, HEIGHT) hit = False score += 1 if hamster_time < 0", "- clock.get_time() elif game_state == \"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko je potrebno ceka do", "hamster_time = 3000 hamster_x = random.randint(20, WIDTH) hamster_y = random.randint(20, HEIGHT) hit =", "= pygame.transform.scale(hamster, \\ (100, 100) ) #definiranje novog ekrana za igru screen =", "= (WIDTH, HEIGHT) #definiranje boja - guglaj colorpicker WHITE= ( 255, 255, 255)", "False, BLUE) #daj mi velicinu welcome teksta welcome_text_size = welcome_text.get_rect() welcome_image = pygame.image.load(", "100) ) #definiranje novog ekrana za igru screen = pygame.display.set_mode(size) #definiranje naziva prozora", ") screen.blit(welcome_text, ( 100, 100) ) elif game_state == \"game\": if hit: hamster_time", "random.randint(20, WIDTH) hamster_y = random.randint(20, HEIGHT) hit = False score += 1 if", "screen.fill(BLACK) pygame.display.flip() #ukoliko je potrebno ceka do iscrtavanja #iduceg framea kako bi imao", "10) ) hamster_time = hamster_time - clock.get_time() elif game_state == \"game_over\": screen.fill(BLACK) pygame.display.flip()", "#definiranje novog ekrana za igru screen = pygame.display.set_mode(size) #definiranje naziva prozora pygame.display.set_caption(\"Nasa kul", "score_text = \\ myfont.render(\"Score: %d!\"%score, \\ False, BLUE) screen.blit(score_text, (10, 10) ) hamster_time", "\"shark.jpg\") hamster = pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster, \\ (100, 100) ) #definiranje novog", "hamster_y = 100, 100 score = 0 while not done: #event petlja for", "255) #Renderiranje pozdravnog teksta myfont = pygame.font.SysFont('Arial', 30) welcome_text = myfont.render(\"Dobrodosli!\", \\ False,", "game_state = \"game_over\" screen.fill(WHITE) hamster_pos = screen.blit(hamster, \\ (hamster_x, hamster_y)) score_text = \\", "hamster_y)) score_text = \\ myfont.render(\"Score: %d!\"%score, \\ False, BLUE) screen.blit(score_text, (10, 10) )", "= random.randint(20, WIDTH) hamster_y = random.randint(20, HEIGHT) hit = False score += 1", "screen.blit(score_text, (10, 10) ) hamster_time = hamster_time - clock.get_time() elif game_state == \"game_over\":", "= 3000 hamster_x = random.randint(20, WIDTH) hamster_y = random.randint(20, HEIGHT) hit = False", "\\ False, BLUE) screen.blit(score_text, (10, 10) ) hamster_time = hamster_time - clock.get_time() elif", "konstanti za velicinu prozora WIDTH = 1024 HEIGHT = 600 # tuple velicine", "= (0, 0, 255) #Renderiranje pozdravnog teksta myfont = pygame.font.SysFont('Arial', 30) welcome_text =", "prozora WIDTH = 1024 HEIGHT = 600 # tuple velicine prozora size =", "velicinu prozora WIDTH = 1024 HEIGHT = 600 # tuple velicine prozora size", "0 ) BLUE = (0, 0, 255) #Renderiranje pozdravnog teksta myfont = pygame.font.SysFont('Arial',", "#definiranje boja - guglaj colorpicker WHITE= ( 255, 255, 255) BLACK= ( 0,", "= hamster_time - clock.get_time() elif game_state == \"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko je potrebno", "welcome_text = myfont.render(\"Dobrodosli!\", \\ False, BLUE) #daj mi velicinu welcome teksta welcome_text_size =", "BLUE) #daj mi velicinu welcome teksta welcome_text_size = welcome_text.get_rect() welcome_image = pygame.image.load( \\", "#inicijalizacija pygame pygame.init() #inicijalizacije fontova pygame.font.init() # definiranje konstanti za velicinu prozora WIDTH", "naziva prozora pygame.display.set_caption(\"Nasa kul igra\") clock = pygame.time.Clock() game_state = \"welcome\" done =", "pygame.KEYDOWN: if event.key == pygame.K_SPACE: game_state = \"game\" elif event.type == pygame.MOUSEBUTTONDOWN: if", ") BLUE = (0, 0, 255) #Renderiranje pozdravnog teksta myfont = pygame.font.SysFont('Arial', 30)", "done = False hit = False hamster_time = 3000 hamster_x, hamster_y = 100,", "255, 255) BLACK= ( 0, 0, 0 ) BLUE = (0, 0, 255)", "elif game_state == \"game_over\": screen.fill(BLACK) pygame.display.flip() #ukoliko je potrebno ceka do iscrtavanja #iduceg", "screen.blit(welcome_image, (0,0) ) screen.blit(welcome_text, ( 100, 100) ) elif game_state == \"game\": if", "\\ \"shark.jpg\") hamster = pygame.image.load(\"hamster.png\") hamster = pygame.transform.scale(hamster, \\ (100, 100) ) #definiranje", "myfont = pygame.font.SysFont('Arial', 30) welcome_text = myfont.render(\"Dobrodosli!\", \\ False, BLUE) #daj mi velicinu" ]
[ "\"\"\" triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels =", "average uniqueness is the same as the manual calculation. \"\"\" triple_barrier = TripleBarrierMethodLabeling(", "target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert", "target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22] == 13 / 36", "import pytest from mizarlabs.static import CLOSE from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from mizarlabs.transformers.targets.labeling import", "test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check whether average uniqueness is the same as the manual", "def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check whether average uniqueness is the same as the", "mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check", "TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna() avg_uniqueness_transformer =", "CLOSE from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame):", "triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22] ==", "= target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22] == 13 /", "AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22] == 13 / 36 if __name__ ==", "TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check whether average uniqueness is the same", "same as the manual calculation. \"\"\" triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 )", "import pandas as pd import pytest from mizarlabs.static import CLOSE from mizarlabs.transformers.sampling.average_uniqueness import", "as pd import pytest from mizarlabs.static import CLOSE from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from", "import AverageUniqueness from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check whether", "is the same as the manual calculation. \"\"\" triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1,", "the same as the manual calculation. \"\"\" triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1", "avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22] == 13 / 36 if", "stop_loss_factor=0.1 ) target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness =", "import CLOSE from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe:", "= TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna() avg_uniqueness_transformer", "= AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22] == 13 / 36 if __name__", "num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness()", "= avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22] == 13 / 36 if __name__ == \"__main__\": pytest.main([__file__])", "\"\"\" Check whether average uniqueness is the same as the manual calculation. \"\"\"", "from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check whether average uniqueness", "import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check whether average uniqueness is the", "@pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check whether average uniqueness is the same as", "profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness", "mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check whether average uniqueness is", "pytest from mizarlabs.static import CLOSE from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling", "uniqueness is the same as the manual calculation. \"\"\" triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3,", "avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22] == 13 / 36 if __name__ == \"__main__\":", "from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\"", "manual calculation. \"\"\" triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]])", "from mizarlabs.static import CLOSE from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\")", "pd.DataFrame): \"\"\" Check whether average uniqueness is the same as the manual calculation.", "triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna()", "calculation. \"\"\" triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels", "= triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22]", "pd import pytest from mizarlabs.static import CLOSE from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from mizarlabs.transformers.targets.labeling", "the manual calculation. \"\"\" triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels =", "AverageUniqueness from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame): \"\"\" Check whether average", ") target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]]) target_labels = target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels)", "Check whether average uniqueness is the same as the manual calculation. \"\"\" triple_barrier", "as the manual calculation. \"\"\" triple_barrier = TripleBarrierMethodLabeling( num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1 ) target_labels", "target_labels = target_labels.dropna() avg_uniqueness_transformer = AverageUniqueness() avg_uniqueness = avg_uniqueness_transformer.transform(target_labels) assert avg_uniqueness.iloc[22] == 13", "pandas as pd import pytest from mizarlabs.static import CLOSE from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness", "<gh_stars>10-100 import pandas as pd import pytest from mizarlabs.static import CLOSE from mizarlabs.transformers.sampling.average_uniqueness", "mizarlabs.static import CLOSE from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling @pytest.mark.usefixtures(\"dollar_bar_dataframe\") def", "whether average uniqueness is the same as the manual calculation. \"\"\" triple_barrier =" ]
[ ".withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date)) # COMMAND", "merge_condition = \"tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path,", "JSON file using the spark dataframe reader API # COMMAND ---------- from pyspark.sql.types", "# COMMAND ---------- # MAGIC %run \"../includes/configuration\" # COMMAND ---------- # MAGIC %run", "StructField(\"q2\", StringType(), True), StructField(\"q3\", StringType(), True), ]) # COMMAND ---------- qualifying_df = spark.read", "new columns # MAGIC 1. Rename qualifyingId, driverId, constructorId and raceId # MAGIC", "StructField(\"constructorId\", IntegerType(), True), StructField(\"number\", IntegerType(), True), StructField(\"position\", IntegerType(), True), StructField(\"q1\", StringType(), True), StructField(\"q2\",", "COMMAND ---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) # COMMAND ---------- from pyspark.sql.functions import lit #", "\"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date))", "# MAGIC %run \"../includes/common_functions\" # COMMAND ---------- # MAGIC %md # MAGIC #####", "---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) # COMMAND ---------- from pyspark.sql.functions import lit # COMMAND", "Step 1 - Read the JSON file using the spark dataframe reader API", "---------- # MAGIC %run \"../includes/common_functions\" # COMMAND ---------- # MAGIC %md # MAGIC", "IntegerType(), True), StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\", IntegerType(), True), StructField(\"number\", IntegerType(), True), StructField(\"position\", IntegerType(),", "Rename columns and add new columns # MAGIC 1. Rename qualifyingId, driverId, constructorId", "output to processed container in parquet format # COMMAND ---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying',", "\"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date)) # COMMAND ----------", "\\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date)) #", "and add new columns # MAGIC 1. Rename qualifyingId, driverId, constructorId and raceId", "\"../includes/configuration\" # COMMAND ---------- # MAGIC %run \"../includes/common_functions\" # COMMAND ---------- # MAGIC", "qualifyingId, driverId, constructorId and raceId # MAGIC 1. Add ingestion_date with current timestamp", "current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date)) # COMMAND ---------- # MAGIC %md", "# MAGIC 1. Rename qualifyingId, driverId, constructorId and raceId # MAGIC 1. Add", "StructField(\"q3\", StringType(), True), ]) # COMMAND ---------- qualifying_df = spark.read \\ .schema(qualifying_schema) \\", "tgt.race_id = src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id') # COMMAND ---------- dbutils.notebook.exit(\"Success\")", "spark.read \\ .schema(qualifying_schema) \\ .option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ---------- # MAGIC", "qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) # COMMAND ---------- from pyspark.sql.functions import lit # COMMAND ----------", "# COMMAND ---------- from pyspark.sql.types import StructType, StructField, IntegerType, StringType # COMMAND ----------", "lit(v_file_date)) # COMMAND ---------- # MAGIC %md # MAGIC ##### Step 3 -", "- Read the JSON file using the spark dataframe reader API # COMMAND", "lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date)) # COMMAND ---------- # MAGIC %md # MAGIC #####", "# COMMAND ---------- # MAGIC %md # MAGIC ##### Step 3 - Write", "= qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\", "---------- merge_condition = \"tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying',", "# MAGIC 1. Add ingestion_date with current timestamp # COMMAND ---------- qualifying_with_ingestion_date_df =", "\\ .withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source)) \\", "files # COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source = dbutils.widgets.get(\"p_data_source\") # COMMAND ---------- dbutils.widgets.text(\"p_file_date\",", "# Databricks notebook source # MAGIC %md # MAGIC ### Ingest qualifying json", "Add ingestion_date with current timestamp # COMMAND ---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) # COMMAND", "driverId, constructorId and raceId # MAGIC 1. Add ingestion_date with current timestamp #", "'qualifying', 'race_id') # COMMAND ---------- merge_condition = \"tgt.qualify_id = src.qualify_id AND tgt.race_id =", "= \"tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition,", "reader API # COMMAND ---------- from pyspark.sql.types import StructType, StructField, IntegerType, StringType #", "##### Step 3 - Write to output to processed container in parquet format", "#overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id') # COMMAND ---------- merge_condition = \"tgt.qualify_id = src.qualify_id AND", "IntegerType(), True), StructField(\"constructorId\", IntegerType(), True), StructField(\"number\", IntegerType(), True), StructField(\"position\", IntegerType(), True), StructField(\"q1\", StringType(),", "MAGIC %run \"../includes/configuration\" # COMMAND ---------- # MAGIC %run \"../includes/common_functions\" # COMMAND ----------", "COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source = dbutils.widgets.get(\"p_data_source\") # COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date", "MAGIC ##### Step 2 - Rename columns and add new columns # MAGIC", "# COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source = dbutils.widgets.get(\"p_data_source\") # COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\")", "---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\") # COMMAND ---------- # MAGIC %run \"../includes/configuration\"", "format # COMMAND ---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id') # COMMAND ---------- merge_condition =", "dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\") # COMMAND ---------- # MAGIC %run \"../includes/configuration\" #", "to output to processed container in parquet format # COMMAND ---------- #overwrite_partition(final_df, 'f1_processed',", "# COMMAND ---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) # COMMAND ---------- from pyspark.sql.functions import lit", "qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\", IntegerType(),", "MAGIC ##### Step 3 - Write to output to processed container in parquet", "---------- # MAGIC %md # MAGIC ##### Step 2 - Rename columns and", "COMMAND ---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\") \\", "3 - Write to output to processed container in parquet format # COMMAND", "MAGIC %run \"../includes/common_functions\" # COMMAND ---------- # MAGIC %md # MAGIC ##### Step", "# MAGIC ##### Step 2 - Rename columns and add new columns #", ".withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date)) # COMMAND ---------- # MAGIC", "MAGIC %md # MAGIC ##### Step 3 - Write to output to processed", "current timestamp # COMMAND ---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) # COMMAND ---------- from pyspark.sql.functions", "IntegerType(), True), StructField(\"number\", IntegerType(), True), StructField(\"position\", IntegerType(), True), StructField(\"q1\", StringType(), True), StructField(\"q2\", StringType(),", "# COMMAND ---------- # MAGIC %md # MAGIC ##### Step 2 - Rename", "StructField, IntegerType, StringType # COMMAND ---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\", IntegerType(),", "StringType(), True), ]) # COMMAND ---------- qualifying_df = spark.read \\ .schema(qualifying_schema) \\ .option(\"multiLine\",", "= dbutils.widgets.get(\"p_data_source\") # COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\") # COMMAND ----------", "%run \"../includes/common_functions\" # COMMAND ---------- # MAGIC %md # MAGIC ##### Step 1", "COMMAND ---------- # MAGIC %run \"../includes/configuration\" # COMMAND ---------- # MAGIC %run \"../includes/common_functions\"", "from pyspark.sql.types import StructType, StructField, IntegerType, StringType # COMMAND ---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\",", "\\ .withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp()) \\", "notebook source # MAGIC %md # MAGIC ### Ingest qualifying json files #", "= src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id') # COMMAND ---------- dbutils.notebook.exit(\"Success\") #", "= dbutils.widgets.get(\"p_file_date\") # COMMAND ---------- # MAGIC %run \"../includes/configuration\" # COMMAND ---------- #", "COMMAND ---------- qualifying_df = spark.read \\ .schema(qualifying_schema) \\ .option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") #", "---------- # MAGIC %md # MAGIC ##### Step 3 - Write to output", "pyspark.sql.functions import lit # COMMAND ---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\")", "container in parquet format # COMMAND ---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id') # COMMAND", "COMMAND ---------- merge_condition = \"tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id\" merge_delta_data(final_df, 'f1_processed',", "%md # MAGIC ##### Step 1 - Read the JSON file using the", "# MAGIC %md # MAGIC ##### Step 1 - Read the JSON file", "StringType # COMMAND ---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\",", "Ingest qualifying json files # COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source = dbutils.widgets.get(\"p_data_source\") #", "%md # MAGIC ### Ingest qualifying json files # COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\")", "---------- # MAGIC %md # MAGIC ##### Step 1 - Read the JSON", "# MAGIC ##### Step 1 - Read the JSON file using the spark", "processed container in parquet format # COMMAND ---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id') #", "##### Step 1 - Read the JSON file using the spark dataframe reader", "IntegerType(), True), StructField(\"q1\", StringType(), True), StructField(\"q2\", StringType(), True), StructField(\"q3\", StringType(), True), ]) #", "True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ---------- # MAGIC %md # MAGIC ##### Step", "qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\",", ".withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\",", "---------- # MAGIC %run \"../includes/configuration\" # COMMAND ---------- # MAGIC %run \"../includes/common_functions\" #", "1 - Read the JSON file using the spark dataframe reader API #", "import lit # COMMAND ---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\") \\", "COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\") # COMMAND ---------- # MAGIC %run", "constructorId and raceId # MAGIC 1. Add ingestion_date with current timestamp # COMMAND", "from pyspark.sql.functions import lit # COMMAND ---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\",", "True), StructField(\"number\", IntegerType(), True), StructField(\"position\", IntegerType(), True), StructField(\"q1\", StringType(), True), StructField(\"q2\", StringType(), True),", "ingestion_date with current timestamp # COMMAND ---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) # COMMAND ----------", "StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\", IntegerType(), True), StructField(\"number\", IntegerType(), True), StructField(\"position\",", "'race_id') # COMMAND ---------- merge_condition = \"tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id\"", ".option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ---------- # MAGIC %md # MAGIC #####", "Read the JSON file using the spark dataframe reader API # COMMAND ----------", "COMMAND ---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id') # COMMAND ---------- merge_condition = \"tgt.qualify_id =", "# MAGIC %md # MAGIC ##### Step 2 - Rename columns and add", "and raceId # MAGIC 1. Add ingestion_date with current timestamp # COMMAND ----------", "True), ]) # COMMAND ---------- qualifying_df = spark.read \\ .schema(qualifying_schema) \\ .option(\"multiLine\", True)", "---------- from pyspark.sql.functions import lit # COMMAND ---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\", "\\ .option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ---------- # MAGIC %md # MAGIC", "\\ .withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date)) # COMMAND ---------- #", "Step 2 - Rename columns and add new columns # MAGIC 1. Rename", "False), StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\", IntegerType(), True), StructField(\"number\", IntegerType(), True),", "COMMAND ---------- # MAGIC %md # MAGIC ##### Step 2 - Rename columns", ".withColumn(\"file_date\", lit(v_file_date)) # COMMAND ---------- # MAGIC %md # MAGIC ##### Step 3", "AND tgt.race_id = src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id') # COMMAND ----------", "pyspark.sql.types import StructType, StructField, IntegerType, StringType # COMMAND ---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(),", "StringType(), True), StructField(\"q2\", StringType(), True), StructField(\"q3\", StringType(), True), ]) # COMMAND ---------- qualifying_df", "= spark.read \\ .schema(qualifying_schema) \\ .option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ---------- #", "= src.qualify_id AND tgt.race_id = src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id') #", "True), StructField(\"position\", IntegerType(), True), StructField(\"q1\", StringType(), True), StructField(\"q2\", StringType(), True), StructField(\"q3\", StringType(), True),", "columns # MAGIC 1. Rename qualifyingId, driverId, constructorId and raceId # MAGIC 1.", "COMMAND ---------- from pyspark.sql.functions import lit # COMMAND ---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\")", "%run \"../includes/configuration\" # COMMAND ---------- # MAGIC %run \"../includes/common_functions\" # COMMAND ---------- #", "True), StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\", IntegerType(), True), StructField(\"number\", IntegerType(), True), StructField(\"position\", IntegerType(), True),", "\"../includes/common_functions\" # COMMAND ---------- # MAGIC %md # MAGIC ##### Step 1 -", "COMMAND ---------- # MAGIC %md # MAGIC ##### Step 1 - Read the", "\"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp())", "MAGIC %md # MAGIC ### Ingest qualifying json files # COMMAND ---------- dbutils.widgets.text(\"p_data_source\",", "the spark dataframe reader API # COMMAND ---------- from pyspark.sql.types import StructType, StructField,", "Rename qualifyingId, driverId, constructorId and raceId # MAGIC 1. Add ingestion_date with current", "'f1_processed', 'qualifying', 'race_id') # COMMAND ---------- merge_condition = \"tgt.qualify_id = src.qualify_id AND tgt.race_id", "\"tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id')", "True), StructField(\"q2\", StringType(), True), StructField(\"q3\", StringType(), True), ]) # COMMAND ---------- qualifying_df =", "the JSON file using the spark dataframe reader API # COMMAND ---------- from", "qualifying_df = spark.read \\ .schema(qualifying_schema) \\ .option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ----------", "---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id') # COMMAND ---------- merge_condition = \"tgt.qualify_id = src.qualify_id", "True), StructField(\"q1\", StringType(), True), StructField(\"q2\", StringType(), True), StructField(\"q3\", StringType(), True), ]) # COMMAND", ".json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ---------- # MAGIC %md # MAGIC ##### Step 2 -", "in parquet format # COMMAND ---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id') # COMMAND ----------", "using the spark dataframe reader API # COMMAND ---------- from pyspark.sql.types import StructType,", "\\ .schema(qualifying_schema) \\ .option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ---------- # MAGIC %md", "2 - Rename columns and add new columns # MAGIC 1. Rename qualifyingId,", "---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\",", "True), StructField(\"q3\", StringType(), True), ]) # COMMAND ---------- qualifying_df = spark.read \\ .schema(qualifying_schema)", "import StructType, StructField, IntegerType, StringType # COMMAND ---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(), False),", "StructField(\"q1\", StringType(), True), StructField(\"q2\", StringType(), True), StructField(\"q3\", StringType(), True), ]) # COMMAND ----------", "\\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ---------- # MAGIC %md # MAGIC ##### Step 2", "add new columns # MAGIC 1. Rename qualifyingId, driverId, constructorId and raceId #", "\\ .withColumn(\"file_date\", lit(v_file_date)) # COMMAND ---------- # MAGIC %md # MAGIC ##### Step", "StringType(), True), StructField(\"q3\", StringType(), True), ]) # COMMAND ---------- qualifying_df = spark.read \\", "dbutils.widgets.text(\"p_data_source\", \"\") v_data_source = dbutils.widgets.get(\"p_data_source\") # COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\")", "MAGIC 1. Rename qualifyingId, driverId, constructorId and raceId # MAGIC 1. Add ingestion_date", "IntegerType(), False), StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\", IntegerType(), True), StructField(\"number\", IntegerType(),", "# COMMAND ---------- # MAGIC %run \"../includes/common_functions\" # COMMAND ---------- # MAGIC %md", "\\ .withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date)) # COMMAND ---------- # MAGIC %md #", "# COMMAND ---------- from pyspark.sql.functions import lit # COMMAND ---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\",", "dbutils.widgets.get(\"p_file_date\") # COMMAND ---------- # MAGIC %run \"../includes/configuration\" # COMMAND ---------- # MAGIC", "final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\")", "# MAGIC %md # MAGIC ### Ingest qualifying json files # COMMAND ----------", "- Write to output to processed container in parquet format # COMMAND ----------", "# COMMAND ---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\")", "src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id') # COMMAND ---------- dbutils.notebook.exit(\"Success\") # COMMAND", "COMMAND ---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\", IntegerType(), True),", "MAGIC %md # MAGIC ##### Step 1 - Read the JSON file using", "# COMMAND ---------- # MAGIC %md # MAGIC ##### Step 1 - Read", "# COMMAND ---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id') # COMMAND ---------- merge_condition = \"tgt.qualify_id", "COMMAND ---------- # MAGIC %run \"../includes/common_functions\" # COMMAND ---------- # MAGIC %md #", "---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source = dbutils.widgets.get(\"p_data_source\") # COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date =", "v_data_source = dbutils.widgets.get(\"p_data_source\") # COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\") # COMMAND", ".schema(qualifying_schema) \\ .option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND ---------- # MAGIC %md #", "##### Step 2 - Rename columns and add new columns # MAGIC 1.", "1. Add ingestion_date with current timestamp # COMMAND ---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) #", "# MAGIC ##### Step 3 - Write to output to processed container in", "API # COMMAND ---------- from pyspark.sql.types import StructType, StructField, IntegerType, StringType # COMMAND", "MAGIC 1. Add ingestion_date with current timestamp # COMMAND ---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df)", "COMMAND ---------- # MAGIC %md # MAGIC ##### Step 3 - Write to", "IntegerType(), True), StructField(\"position\", IntegerType(), True), StructField(\"q1\", StringType(), True), StructField(\"q2\", StringType(), True), StructField(\"q3\", StringType(),", "MAGIC ##### Step 1 - Read the JSON file using the spark dataframe", "\"\") v_data_source = dbutils.widgets.get(\"p_data_source\") # COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\") #", "# COMMAND ---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\", IntegerType(),", "MAGIC %md # MAGIC ##### Step 2 - Rename columns and add new", "1. Rename qualifyingId, driverId, constructorId and raceId # MAGIC 1. Add ingestion_date with", "StructField(\"position\", IntegerType(), True), StructField(\"q1\", StringType(), True), StructField(\"q2\", StringType(), True), StructField(\"q3\", StringType(), True), ])", ".withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\", lit(v_file_date)) # COMMAND ---------- # MAGIC %md # MAGIC", "columns and add new columns # MAGIC 1. Rename qualifyingId, driverId, constructorId and", "StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\", IntegerType(), True), StructField(\"number\", IntegerType(), True), StructField(\"position\", IntegerType(), True), StructField(\"q1\",", "add_ingestion_date(qualifying_df) # COMMAND ---------- from pyspark.sql.functions import lit # COMMAND ---------- final_df =", "Step 3 - Write to output to processed container in parquet format #", "True), StructField(\"constructorId\", IntegerType(), True), StructField(\"number\", IntegerType(), True), StructField(\"position\", IntegerType(), True), StructField(\"q1\", StringType(), True),", "dataframe reader API # COMMAND ---------- from pyspark.sql.types import StructType, StructField, IntegerType, StringType", "raceId # MAGIC 1. Add ingestion_date with current timestamp # COMMAND ---------- qualifying_with_ingestion_date_df", "timestamp # COMMAND ---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) # COMMAND ---------- from pyspark.sql.functions import", "Databricks notebook source # MAGIC %md # MAGIC ### Ingest qualifying json files", "qualifying json files # COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source = dbutils.widgets.get(\"p_data_source\") # COMMAND", "# COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\") # COMMAND ---------- # MAGIC", "# COMMAND ---------- merge_condition = \"tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id\" merge_delta_data(final_df,", "%md # MAGIC ##### Step 3 - Write to output to processed container", ".withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source)) \\ .withColumn(\"file_date\",", "merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id') # COMMAND ---------- dbutils.notebook.exit(\"Success\") # COMMAND ----------", "StructField(\"number\", IntegerType(), True), StructField(\"position\", IntegerType(), True), StructField(\"q1\", StringType(), True), StructField(\"q2\", StringType(), True), StructField(\"q3\",", "\"driver_id\") \\ .withColumnRenamed(\"raceId\", \"race_id\") \\ .withColumnRenamed(\"constructorId\", \"constructor_id\") \\ .withColumn(\"ingestion_date\", current_timestamp()) \\ .withColumn(\"data_source\", lit(v_data_source))", "source # MAGIC %md # MAGIC ### Ingest qualifying json files # COMMAND", "# MAGIC %run \"../includes/configuration\" # COMMAND ---------- # MAGIC %run \"../includes/common_functions\" # COMMAND", "Write to output to processed container in parquet format # COMMAND ---------- #overwrite_partition(final_df,", "= add_ingestion_date(qualifying_df) # COMMAND ---------- from pyspark.sql.functions import lit # COMMAND ---------- final_df", "IntegerType, StringType # COMMAND ---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\", IntegerType(), True),", "COMMAND ---------- from pyspark.sql.types import StructType, StructField, IntegerType, StringType # COMMAND ---------- qualifying_schema", "---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\",", "# MAGIC ### Ingest qualifying json files # COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source", "spark dataframe reader API # COMMAND ---------- from pyspark.sql.types import StructType, StructField, IntegerType,", "to processed container in parquet format # COMMAND ---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id')", "v_file_date = dbutils.widgets.get(\"p_file_date\") # COMMAND ---------- # MAGIC %run \"../includes/configuration\" # COMMAND ----------", "StructType, StructField, IntegerType, StringType # COMMAND ---------- qualifying_schema = StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\",", "StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\", IntegerType(), True), StructField(\"number\",", "# MAGIC %md # MAGIC ##### Step 3 - Write to output to", "]) # COMMAND ---------- qualifying_df = spark.read \\ .schema(qualifying_schema) \\ .option(\"multiLine\", True) \\", "with current timestamp # COMMAND ---------- qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df) # COMMAND ---------- from", "---------- qualifying_df = spark.read \\ .schema(qualifying_schema) \\ .option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\") # COMMAND", "file using the spark dataframe reader API # COMMAND ---------- from pyspark.sql.types import", "%md # MAGIC ##### Step 2 - Rename columns and add new columns", "# COMMAND ---------- qualifying_df = spark.read \\ .schema(qualifying_schema) \\ .option(\"multiLine\", True) \\ .json(f\"{raw_folder_path}/{v_file_date}/qualifying\")", "MAGIC ### Ingest qualifying json files # COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source =", "---------- from pyspark.sql.types import StructType, StructField, IntegerType, StringType # COMMAND ---------- qualifying_schema =", "### Ingest qualifying json files # COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source = dbutils.widgets.get(\"p_data_source\")", "lit # COMMAND ---------- final_df = qualifying_with_ingestion_date_df.withColumnRenamed(\"qualifyId\", \"qualify_id\") \\ .withColumnRenamed(\"driverId\", \"driver_id\") \\ .withColumnRenamed(\"raceId\",", "dbutils.widgets.get(\"p_data_source\") # COMMAND ---------- dbutils.widgets.text(\"p_file_date\", \"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\") # COMMAND ---------- #", "src.qualify_id AND tgt.race_id = src.race_id\" merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id') # COMMAND", "= StructType(fields=[StructField(\"qualifyId\", IntegerType(), False), StructField(\"raceId\", IntegerType(), True), StructField(\"driverId\", IntegerType(), True), StructField(\"constructorId\", IntegerType(), True),", "- Rename columns and add new columns # MAGIC 1. Rename qualifyingId, driverId,", "json files # COMMAND ---------- dbutils.widgets.text(\"p_data_source\", \"\") v_data_source = dbutils.widgets.get(\"p_data_source\") # COMMAND ----------", "parquet format # COMMAND ---------- #overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id') # COMMAND ---------- merge_condition", "\"2021-03-21\") v_file_date = dbutils.widgets.get(\"p_file_date\") # COMMAND ---------- # MAGIC %run \"../includes/configuration\" # COMMAND" ]
[ "assert args.input is not None assert args.output is not None pool.demo.demo(args.input) return 0", "import os import sys import pool.demo def main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser =", "not None assert args.output is not None pool.demo.demo(args.input) return 0 if __name__ ==", "os import sys import pool.demo def main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser = argparse.ArgumentParser(", "parser = argparse.ArgumentParser( description='ball locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout) args =", "default=sys.stdout) args = parser.parse_args() assert args.input is not None assert args.output is not", "= argparse.ArgumentParser( description='ball locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args()", "pool.demo def main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser = argparse.ArgumentParser( description='ball locator') parser.add_argument('input', type=argparse.FileType('r'))", "type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args() assert args.input is not None assert args.output is", "import pool.demo def main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser = argparse.ArgumentParser( description='ball locator') parser.add_argument('input',", "assert args.output is not None pool.demo.demo(args.input) return 0 if __name__ == '__main__': main()", "locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args() assert args.input is", "description='ball locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args() assert args.input", "argparse.ArgumentParser( description='ball locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args() assert", "print_function import argparse import os import sys import pool.demo def main(): print(os.getcwd()) print(os.listdir(\".\"))", "print(os.listdir(\"tools\")) parser = argparse.ArgumentParser( description='ball locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout) args", "parser.parse_args() assert args.input is not None assert args.output is not None pool.demo.demo(args.input) return", "main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser = argparse.ArgumentParser( description='ball locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?',", "= parser.parse_args() assert args.input is not None assert args.output is not None pool.demo.demo(args.input)", "None assert args.output is not None pool.demo.demo(args.input) return 0 if __name__ == '__main__':", "print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser = argparse.ArgumentParser( description='ball locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'),", "import print_function import argparse import os import sys import pool.demo def main(): print(os.getcwd())", "def main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser = argparse.ArgumentParser( description='ball locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output',", "sys import pool.demo def main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser = argparse.ArgumentParser( description='ball locator')", "nargs='?', type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args() assert args.input is not None assert args.output", "args.input is not None assert args.output is not None pool.demo.demo(args.input) return 0 if", "<filename>pool/cmd.py<gh_stars>0 from __future__ import print_function import argparse import os import sys import pool.demo", "parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args() assert args.input is not None assert", "from __future__ import print_function import argparse import os import sys import pool.demo def", "argparse import os import sys import pool.demo def main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser", "parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args() assert args.input is not", "print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser = argparse.ArgumentParser( description='ball locator') parser.add_argument('input', type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)", "__future__ import print_function import argparse import os import sys import pool.demo def main():", "is not None assert args.output is not None pool.demo.demo(args.input) return 0 if __name__", "import argparse import os import sys import pool.demo def main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\"))", "import sys import pool.demo def main(): print(os.getcwd()) print(os.listdir(\".\")) print(os.listdir(\"tools\")) parser = argparse.ArgumentParser( description='ball", "type=argparse.FileType('r')) parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args() assert args.input is not None", "args = parser.parse_args() assert args.input is not None assert args.output is not None" ]
[ "events = events[['contract_address', 'transaction_hash', 'arg__base_pool']] events = events.rename( columns={ 'contract_address': 'factory', 'arg__base_pool': 'pool',", "columns={ 'contract_address': 'factory', 'arg__coins': 'coins', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } )", "function_parameters=[pool], ) coins = [coin for coin in coins if coin not in", "event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events =", "from ctc import evm from ctc import rpc from ctc import spec old_pool_factory", "{ '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } # # # call based # async", "] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider,", "function_name='get_coins', function_parameters=[pool], ) coins = [coin for coin in coins if coin not", "['0x0000000000000000000000000000000000000000', eth_address] ] symbols = await evm.async_get_erc20s_symbols( valid_coins, ) if eth_address in coins:", "12903979 # gather data coroutines = [] for factory in [old_pool_factory, pool_factory]: if", "'arg__coins': 'coins', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } ) return events async", "async def _async_get_pool_data( p: int, factory: spec.Address, include_balances: bool = False, ) ->", "coin not in [eth_address]] valid_coins = [ coin for coin in coins if", "data coroutines = [] for factory in [old_pool_factory, pool_factory]: if start_block is None:", "coroutine = evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs =", "'symbols': symbols, 'balances': balances, } # # # event based # async def", "12903979, } # # # call based # async def async_get_factory_pool_data( factory: spec.Address,", "balances, } # # # event based # async def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference]", "include_balances: bool = False, ) -> CurvePoolData: pool = await rpc.async_eth_call( to_address=factory, function_name='pool_list',", "None, verbose: bool = False, ) -> spec.DataFrame: if start_block is None: start_block", "evm.async_get_erc20s_balance_of( # type: ignore tokens=valid_coins, address=pool, ) ) if eth_address in coins: eth_balance", "= [] for factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block =", "= 12903979 # gather data coroutines = [] for factory in [old_pool_factory, pool_factory]:", "'deployer', } ) return events async def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block:", "in [eth_address]] valid_coins = [ coin for coin in coins if coin not", "'ETH') if include_balances: balances: typing.MutableSequence[int | float | None] = ( await evm.async_get_erc20s_balance_of(", "evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider, verbose=verbose, ) events = events[ [ 'transaction_hash',", "await rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p], ) coins = await rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool],", "import pandas as pd if start_block is None: start_block = 12903979 # gather", "import annotations import typing from typing_extensions import TypedDict from ctc import evm from", "in range(n_pools) ] return await asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address: spec.Address tokens: typing.Sequence[spec.Address] symbols:", "if include_balances: balances: typing.MutableSequence[int | float | None] = ( await evm.async_get_erc20s_balance_of( #", "'arg__coin': 'coin', 'arg__base_pool': 'base_pool', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } ) return", "return await asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address: spec.Address tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances: typing.Sequence[int", "if start_block is None: factory_start_block = creation_blocks[factory] else: factory_start_block = start_block coroutine =", "start_block is None: start_block = 12903979 # gather data coroutines = [] for", "None: factory_start_block: spec.BlockNumberReference = creation_blocks[ factory ] else: factory_start_block = start_block coroutine =", "= events.rename( columns={ 'contract_address': 'factory', 'arg__base_pool': 'pool', } ) return events async def", "contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider, verbose=verbose, ) events = events[ [ 'transaction_hash', 'contract_address',", "# async def async_get_factory_pool_data( factory: spec.Address, include_balances: bool = False, ) -> list[CurvePoolData]:", "p: int, factory: spec.Address, include_balances: bool = False, ) -> CurvePoolData: pool =", "event based # async def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] =", "function_name='pool_count', ) coroutines = [ _async_get_pool_data(p, factory, include_balances=include_balances) for p in range(n_pools) ]", "= 12903979 events = await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider, verbose=verbose, )", "= None, provider: spec.ProviderSpec = None, verbose: bool = False, ) -> spec.DataFrame:", "for coin in coins if coin not in ['0x0000000000000000000000000000000000000000', eth_address] ] symbols =", "= creation_blocks[ factory ] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed',", ") coins = [coin for coin in coins if coin not in [eth_address]]", "event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events =", "not in ['0x0000000000000000000000000000000000000000', eth_address] ] symbols = await evm.async_get_erc20s_symbols( valid_coins, ) if eth_address", "start_block is None: start_block = 12903979 events = await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block,", "'contract_address': 'factory', 'arg__coin': 'coin', 'arg__base_pool': 'base_pool', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', }", "in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block = creation_blocks[factory] else: factory_start_block =", "async_get_factory_pool_data( factory: spec.Address, include_balances: bool = False, ) -> list[CurvePoolData]: import asyncio n_pools", "import asyncio n_pools = await rpc.async_eth_call( to_address=factory, function_name='pool_count', ) coroutines = [ _async_get_pool_data(p,", "creation_blocks[factory] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider,", ") coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events = pd.concat(dfs) # format data events", "symbols.insert(index, 'ETH') if include_balances: balances: typing.MutableSequence[int | float | None] = ( await", "factory ] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block,", "= [ _async_get_pool_data(p, factory, include_balances=include_balances) for p in range(n_pools) ] return await asyncio.gather(*coroutines)", "balances = [None for coin in coins] return { 'address': pool, 'tokens': coins,", "None] async def _async_get_pool_data( p: int, factory: spec.Address, include_balances: bool = False, )", "in coins if coin not in ['0x0000000000000000000000000000000000000000', eth_address] ] symbols = await evm.async_get_erc20s_symbols(", "None: start_block = 12903979 # gather data coroutines = [] for factory in", "'transaction_hash', 'arg__base_pool']] events = events.rename( columns={ 'contract_address': 'factory', 'arg__base_pool': 'pool', } ) return", "if start_block is None: start_block = 12903979 # gather data coroutines = []", "= events.sort_index() events = events[['contract_address', 'transaction_hash', 'arg__base_pool']] events = events.rename( columns={ 'contract_address': 'factory',", "columns={ 'contract_address': 'factory', 'arg__coin': 'coin', 'arg__base_pool': 'base_pool', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer',", "CurvePoolData: pool = await rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p], ) coins = await rpc.async_eth_call(", "spec.BlockNumberReference = creation_blocks[ factory ] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory,", "await evm.async_get_erc20s_symbols( valid_coins, ) if eth_address in coins: index = coins.index(eth_address) symbols.insert(index, 'ETH')", "coin in coins if coin not in ['0x0000000000000000000000000000000000000000', eth_address] ] symbols = await", "] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coin': 'coin', 'arg__base_pool': 'base_pool', 'arg__A': 'A',", "# call based # async def async_get_factory_pool_data( factory: spec.Address, include_balances: bool = False,", "spec.Address, include_balances: bool = False, ) -> CurvePoolData: pool = await rpc.async_eth_call( to_address=factory,", "= [coin for coin in coins if coin not in [eth_address]] valid_coins =", "as pd if start_block is None: start_block = 12903979 # gather data coroutines", "address=pool, ) ) if eth_address in coins: eth_balance = await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance)", "await rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool], ) coins = [coin for coin in coins", "address: spec.Address tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances: typing.Sequence[int | float | None] async", "| None] async def _async_get_pool_data( p: int, factory: spec.Address, include_balances: bool = False,", "if eth_address in coins: index = coins.index(eth_address) symbols.insert(index, 'ETH') if include_balances: balances: typing.MutableSequence[int", "= coins.index(eth_address) symbols.insert(index, 'ETH') if include_balances: balances: typing.MutableSequence[int | float | None] =", "'transaction_hash', 'contract_address', 'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address':", "verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events = pd.concat(dfs) # format data", "# format data events = events.sort_index() events = events[['contract_address', 'transaction_hash', 'arg__base_pool']] events =", "balances: typing.Sequence[int | float | None] async def _async_get_pool_data( p: int, factory: spec.Address,", "factory_start_block = creation_blocks[factory] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block,", "False, ) -> spec.DataFrame: if start_block is None: start_block = 12903979 events =", "provider=provider, verbose=verbose, ) events = events[ [ 'transaction_hash', 'contract_address', 'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer',", "'contract_address': 'factory', 'arg__base_pool': 'pool', } ) return events async def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference]", "pool = await rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p], ) coins = await rpc.async_eth_call( to_address=factory,", "verbose: bool = False, ) -> spec.DataFrame: import asyncio import pandas as pd", "'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coin': 'coin',", ") return events async def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] =", "[old_pool_factory, pool_factory]: if start_block is None: factory_start_block: spec.BlockNumberReference = creation_blocks[ factory ] else:", "factory_start_block: spec.BlockNumberReference = creation_blocks[ factory ] else: factory_start_block = start_block coroutine = evm.async_get_events(", "[ 'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename(", "import rpc from ctc import spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address", "spec.DataFrame: import asyncio import pandas as pd # gather data coroutines = []", "else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose,", "| float | None] async def _async_get_pool_data( p: int, factory: spec.Address, include_balances: bool", "in coins: eth_balance = await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else: balances = [None for", "= await rpc.async_eth_call( to_address=factory, function_name='pool_count', ) coroutines = [ _async_get_pool_data(p, factory, include_balances=include_balances) for", "index = coins.index(eth_address) symbols.insert(index, 'ETH') if include_balances: balances: typing.MutableSequence[int | float | None]", "factory: spec.Address, include_balances: bool = False, ) -> CurvePoolData: pool = await rpc.async_eth_call(", "events = events[ [ 'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer', ] ]", "# # # call based # async def async_get_factory_pool_data( factory: spec.Address, include_balances: bool", "balances.insert(index, eth_balance) else: balances = [None for coin in coins] return { 'address':", "start_block coroutine = evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs", "] symbols = await evm.async_get_erc20s_symbols( valid_coins, ) if eth_address in coins: index =", "None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec = None, verbose: bool = False,", "symbols: typing.Sequence[str] balances: typing.Sequence[int | float | None] async def _async_get_pool_data( p: int,", "pandas as pd # gather data coroutines = [] for factory in [old_pool_factory,", "rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p], ) coins = await rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool], )", "events = events.sort_index() events = events[ [ 'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee',", "from __future__ import annotations import typing from typing_extensions import TypedDict from ctc import", "None, verbose: bool = False, ) -> spec.DataFrame: import asyncio import pandas as", "asyncio import pandas as pd # gather data coroutines = [] for factory", "'address': pool, 'tokens': coins, 'symbols': symbols, 'balances': balances, } # # # event", "False, ) -> spec.DataFrame: import asyncio import pandas as pd if start_block is", "start_block is None: factory_start_block = creation_blocks[factory] else: factory_start_block = start_block coroutine = evm.async_get_events(", "in coins: index = coins.index(eth_address) symbols.insert(index, 'ETH') if include_balances: balances: typing.MutableSequence[int | float", ") -> spec.DataFrame: import asyncio import pandas as pd # gather data coroutines", "verbose=verbose, ) events = events[ [ 'transaction_hash', 'contract_address', 'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer', ]", "start_block coroutine = evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs", "gather data coroutines = [] for factory in [old_pool_factory, pool_factory]: if start_block is", "async def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec", "from typing_extensions import TypedDict from ctc import evm from ctc import rpc from", "factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block: spec.BlockNumberReference = creation_blocks[ factory", "if start_block is None: start_block = 12903979 events = await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed',", "pd.concat(dfs) # format data events = events.sort_index() events = events[ [ 'transaction_hash', 'contract_address',", "provider: spec.ProviderSpec = None, verbose: bool = False, ) -> spec.DataFrame: if start_block", "else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose,", "import TypedDict from ctc import evm from ctc import rpc from ctc import", "coin in coins] return { 'address': pool, 'tokens': coins, 'symbols': symbols, 'balances': balances,", "] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coins': 'coins', 'arg__A': 'A', 'arg__fee':", "is None: factory_start_block: spec.BlockNumberReference = creation_blocks[ factory ] else: factory_start_block = start_block coroutine", "in coins] return { 'address': pool, 'tokens': coins, 'symbols': symbols, 'balances': balances, }", "# # call based # async def async_get_factory_pool_data( factory: spec.Address, include_balances: bool =", "False, ) -> list[CurvePoolData]: import asyncio n_pools = await rpc.async_eth_call( to_address=factory, function_name='pool_count', )", "is None: start_block = 12903979 events = await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block,", "pd.concat(dfs) # format data events = events.sort_index() events = events[['contract_address', 'transaction_hash', 'arg__base_pool']] events", "events.sort_index() events = events[ [ 'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer', ]", "typing_extensions import TypedDict from ctc import evm from ctc import rpc from ctc", "import evm from ctc import rpc from ctc import spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae'", "events = events.rename( columns={ 'contract_address': 'factory', 'arg__coin': 'coin', 'arg__base_pool': 'base_pool', 'arg__A': 'A', 'arg__fee':", "in coins if coin not in [eth_address]] valid_coins = [ coin for coin", "eth_balance) else: balances = [None for coin in coins] return { 'address': pool,", "include_balances=include_balances) for p in range(n_pools) ] return await asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address: spec.Address", ") coroutines = [ _async_get_pool_data(p, factory, include_balances=include_balances) for p in range(n_pools) ] return", "typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances: typing.Sequence[int | float | None] async def _async_get_pool_data( p:", ") -> spec.DataFrame: if start_block is None: start_block = 12903979 events = await", "'factory', 'arg__base_pool': 'pool', } ) return events async def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] =", "await rpc.async_eth_call( to_address=factory, function_name='pool_count', ) coroutines = [ _async_get_pool_data(p, factory, include_balances=include_balances) for p", "# type: ignore tokens=valid_coins, address=pool, ) ) if eth_address in coins: eth_balance =", "format data events = events.sort_index() events = events[['contract_address', 'transaction_hash', 'arg__base_pool']] events = events.rename(", "= await rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool], ) coins = [coin for coin in", ") events = events[ [ 'transaction_hash', 'contract_address', 'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer', ] ]", "] return await asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address: spec.Address tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances:", "start_block = 12903979 events = await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider, verbose=verbose,", "'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } ) return events async def async_get_meta_pools(", "'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory',", "'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coin': 'coin', 'arg__base_pool': 'base_pool',", "# gather data coroutines = [] for factory in [old_pool_factory, pool_factory]: if start_block", "'contract_address': 'factory', 'arg__coins': 'coins', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } ) return", "bool = False, ) -> spec.DataFrame: import asyncio import pandas as pd #", "evm from ctc import rpc from ctc import spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory", "coin not in ['0x0000000000000000000000000000000000000000', eth_address] ] symbols = await evm.async_get_erc20s_symbols( valid_coins, ) if", "factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block = creation_blocks[factory] else: factory_start_block", "evm.async_get_erc20s_symbols( valid_coins, ) if eth_address in coins: index = coins.index(eth_address) symbols.insert(index, 'ETH') if", "ctc import rpc from ctc import spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4'", "= creation_blocks[factory] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block,", "-> CurvePoolData: pool = await rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p], ) coins = await", "eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } # #", "return { 'address': pool, 'tokens': coins, 'symbols': symbols, 'balances': balances, } # #", "events[['contract_address', 'transaction_hash', 'arg__base_pool']] events = events.rename( columns={ 'contract_address': 'factory', 'arg__base_pool': 'pool', } )", "events[ [ 'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events =", "coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events = pd.concat(dfs) # format data events =", "[] for factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block: spec.BlockNumberReference =", "import pandas as pd # gather data coroutines = [] for factory in", "async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec = None,", "'coins', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } ) return events async def", "[old_pool_factory, pool_factory]: if start_block is None: factory_start_block = creation_blocks[factory] else: factory_start_block = start_block", "evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else: balances = [None for coin in coins] return {", "include_balances: bool = False, ) -> list[CurvePoolData]: import asyncio n_pools = await rpc.async_eth_call(", "pool, 'tokens': coins, 'symbols': symbols, 'balances': balances, } # # # event based", "class CurvePoolData(TypedDict): address: spec.Address tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances: typing.Sequence[int | float |", "None, provider: spec.ProviderSpec = None, verbose: bool = False, ) -> spec.DataFrame: import", "[ 'transaction_hash', 'contract_address', 'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={", "= None, verbose: bool = False, ) -> spec.DataFrame: if start_block is None:", "'pool', } ) return events async def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block:", "bool = False, ) -> CurvePoolData: pool = await rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p],", "def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec =", "spec.ProviderSpec = None, verbose: bool = False, ) -> spec.DataFrame: if start_block is", "coins: index = coins.index(eth_address) symbols.insert(index, 'ETH') if include_balances: balances: typing.MutableSequence[int | float |", "= events.sort_index() events = events[ [ 'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer',", "pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979,", "'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coin':", "old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae':", "evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines)", "= start_block coroutine = evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine)", "bool = False, ) -> spec.DataFrame: if start_block is None: start_block = 12903979", "from ctc import rpc from ctc import spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory =", "= events[ [ 'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events", "for factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block = creation_blocks[factory] else:", "'0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } # # # call", "coroutines = [] for factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block", "] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coin': 'coin', 'arg__base_pool': 'base_pool', 'arg__A':", "spec.Address tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances: typing.Sequence[int | float | None] async def", "'arg__deployer': 'deployer', } ) return events async def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] = None,", "def async_get_factory_pool_data( factory: spec.Address, include_balances: bool = False, ) -> list[CurvePoolData]: import asyncio", "'balances': balances, } # # # event based # async def async_get_base_pools( start_block:", "coroutines = [ _async_get_pool_data(p, factory, include_balances=include_balances) for p in range(n_pools) ] return await", "pd if start_block is None: start_block = 12903979 # gather data coroutines =", "rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool], ) coins = [coin for coin in coins if", "asyncio.gather(*coroutines) events = pd.concat(dfs) # format data events = events.sort_index() events = events[", "symbols = await evm.async_get_erc20s_symbols( valid_coins, ) if eth_address in coins: index = coins.index(eth_address)", "= await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else: balances = [None for coin in coins]", "( await evm.async_get_erc20s_balance_of( # type: ignore tokens=valid_coins, address=pool, ) ) if eth_address in", "include_balances: balances: typing.MutableSequence[int | float | None] = ( await evm.async_get_erc20s_balance_of( # type:", "pool_factory]: if start_block is None: factory_start_block: spec.BlockNumberReference = creation_blocks[ factory ] else: factory_start_block", "from ctc import spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee'", "-> spec.DataFrame: if start_block is None: start_block = 12903979 events = await evm.async_get_events(", "= ( await evm.async_get_erc20s_balance_of( # type: ignore tokens=valid_coins, address=pool, ) ) if eth_address", "None: factory_start_block = creation_blocks[factory] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='BasePoolAdded',", ") return events async def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] =", "p in range(n_pools) ] return await asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address: spec.Address tokens: typing.Sequence[spec.Address]", "events.rename( columns={ 'contract_address': 'factory', 'arg__coins': 'coins', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', }", "False, ) -> CurvePoolData: pool = await rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p], ) coins", "balances: typing.MutableSequence[int | float | None] = ( await evm.async_get_erc20s_balance_of( # type: ignore", "asyncio.gather(*coroutines) events = pd.concat(dfs) # format data events = events.sort_index() events = events[['contract_address',", "function_name='pool_list', function_parameters=[p], ) coins = await rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool], ) coins =", "creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } # # # call based", "end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec = None, verbose: bool = False, )", "factory, include_balances=include_balances) for p in range(n_pools) ] return await asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address:", "'tokens': coins, 'symbols': symbols, 'balances': balances, } # # # event based #", "} # # # event based # async def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] =", ") if eth_address in coins: eth_balance = await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else: balances", ") if eth_address in coins: index = coins.index(eth_address) symbols.insert(index, 'ETH') if include_balances: balances:", "float | None] = ( await evm.async_get_erc20s_balance_of( # type: ignore tokens=valid_coins, address=pool, )", "events = events.rename( columns={ 'contract_address': 'factory', 'arg__coins': 'coins', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer':", "typing.Sequence[str] balances: typing.Sequence[int | float | None] async def _async_get_pool_data( p: int, factory:", "# event based # async def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference]", "'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } ) return events async def async_get_meta_pools( start_block:", "await asyncio.gather(*coroutines) events = pd.concat(dfs) # format data events = events.sort_index() events =", "spec.DataFrame: import asyncio import pandas as pd if start_block is None: start_block =", "= evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await", "= None, verbose: bool = False, ) -> spec.DataFrame: import asyncio import pandas", "columns={ 'contract_address': 'factory', 'arg__base_pool': 'pool', } ) return events async def async_get_plain_pools( start_block:", "} # # # call based # async def async_get_factory_pool_data( factory: spec.Address, include_balances:", "await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider, verbose=verbose, ) events = events[ [", "for p in range(n_pools) ] return await asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address: spec.Address tokens:", "= evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await", "eth_address in coins: index = coins.index(eth_address) symbols.insert(index, 'ETH') if include_balances: balances: typing.MutableSequence[int |", "-> spec.DataFrame: import asyncio import pandas as pd # gather data coroutines =", "async def async_get_factory_pool_data( factory: spec.Address, include_balances: bool = False, ) -> list[CurvePoolData]: import", "= await rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p], ) coins = await rpc.async_eth_call( to_address=factory, function_name='get_coins',", "start_block = 12903979 # gather data coroutines = [] for factory in [old_pool_factory,", "None] = ( await evm.async_get_erc20s_balance_of( # type: ignore tokens=valid_coins, address=pool, ) ) if", "await asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address: spec.Address tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances: typing.Sequence[int |", ") -> list[CurvePoolData]: import asyncio n_pools = await rpc.async_eth_call( to_address=factory, function_name='pool_count', ) coroutines", "events = events.sort_index() events = events[['contract_address', 'transaction_hash', 'arg__base_pool']] events = events.rename( columns={ 'contract_address':", "events.sort_index() events = events[['contract_address', 'transaction_hash', 'arg__base_pool']] events = events.rename( columns={ 'contract_address': 'factory', 'arg__base_pool':", "'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coins': 'coins',", "coin for coin in coins if coin not in ['0x0000000000000000000000000000000000000000', eth_address] ] symbols", "tokens=valid_coins, address=pool, ) ) if eth_address in coins: eth_balance = await evm.async_get_eth_balance(pool) balances.insert(index,", "def _async_get_pool_data( p: int, factory: spec.Address, include_balances: bool = False, ) -> CurvePoolData:", "'contract_address', 'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory',", "end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events = pd.concat(dfs) #", "asyncio n_pools = await rpc.async_eth_call( to_address=factory, function_name='pool_count', ) coroutines = [ _async_get_pool_data(p, factory,", "call based # async def async_get_factory_pool_data( factory: spec.Address, include_balances: bool = False, )", ") coins = await rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool], ) coins = [coin for", "data events = events.sort_index() events = events[['contract_address', 'transaction_hash', 'arg__base_pool']] events = events.rename( columns={", "# format data events = events.sort_index() events = events[ [ 'transaction_hash', 'contract_address', 'arg__coin',", "= events.rename( columns={ 'contract_address': 'factory', 'arg__coin': 'coin', 'arg__base_pool': 'base_pool', 'arg__A': 'A', 'arg__fee': 'fee',", "if eth_address in coins: eth_balance = await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else: balances =", "import asyncio import pandas as pd # gather data coroutines = [] for", "= await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider, verbose=verbose, ) events = events[", "} ) return events async def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference]", "import typing from typing_extensions import TypedDict from ctc import evm from ctc import", "events = await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider, verbose=verbose, ) events =", "= events[['contract_address', 'transaction_hash', 'arg__base_pool']] events = events.rename( columns={ 'contract_address': 'factory', 'arg__base_pool': 'pool', }", "spec.DataFrame: if start_block is None: start_block = 12903979 events = await evm.async_get_events( contract_address=pool_factory,", "typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec = None, verbose: bool = False, ) ->", "await evm.async_get_erc20s_balance_of( # type: ignore tokens=valid_coins, address=pool, ) ) if eth_address in coins:", "11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } # # # call based # async def async_get_factory_pool_data(", "= pd.concat(dfs) # format data events = events.sort_index() events = events[ [ 'transaction_hash',", "'arg__fee': 'fee', 'arg__deployer': 'deployer', } ) return events async def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference]", "symbols, 'balances': balances, } # # # event based # async def async_get_base_pools(", "else: balances = [None for coin in coins] return { 'address': pool, 'tokens':", "pool_factory]: if start_block is None: factory_start_block = creation_blocks[factory] else: factory_start_block = start_block coroutine", "False, ) -> spec.DataFrame: import asyncio import pandas as pd # gather data", "'0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } # # # call based # async def", "[ _async_get_pool_data(p, factory, include_balances=include_balances) for p in range(n_pools) ] return await asyncio.gather(*coroutines) class", "to_address=factory, function_name='pool_count', ) coroutines = [ _async_get_pool_data(p, factory, include_balances=include_balances) for p in range(n_pools)", "events async def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider:", "import spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks =", "events = pd.concat(dfs) # format data events = events.sort_index() events = events[['contract_address', 'transaction_hash',", "coins.index(eth_address) symbols.insert(index, 'ETH') if include_balances: balances: typing.MutableSequence[int | float | None] = (", "factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, )", "async def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec", "based # async def async_get_factory_pool_data( factory: spec.Address, include_balances: bool = False, ) ->", "[eth_address]] valid_coins = [ coin for coin in coins if coin not in", "'fee', 'arg__deployer': 'deployer', } ) return events async def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] =", "is None: start_block = 12903979 # gather data coroutines = [] for factory", "type: ignore tokens=valid_coins, address=pool, ) ) if eth_address in coins: eth_balance = await", "eth_balance = await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else: balances = [None for coin in", "events async def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider:", "eth_address in coins: eth_balance = await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else: balances = [None", "'arg__base_pool': 'pool', } ) return events async def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] = None,", "verbose: bool = False, ) -> spec.DataFrame: if start_block is None: start_block =", "= { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } # # # call based #", "pandas as pd if start_block is None: start_block = 12903979 # gather data", "event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider, verbose=verbose, ) events = events[ [ 'transaction_hash', 'contract_address', 'arg__coins',", "if start_block is None: factory_start_block: spec.BlockNumberReference = creation_blocks[ factory ] else: factory_start_block =", "} ) return events async def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference]", "coins if coin not in [eth_address]] valid_coins = [ coin for coin in", "provider: spec.ProviderSpec = None, verbose: bool = False, ) -> spec.DataFrame: import asyncio", "bool = False, ) -> spec.DataFrame: import asyncio import pandas as pd if", "events.rename( columns={ 'contract_address': 'factory', 'arg__base_pool': 'pool', } ) return events async def async_get_plain_pools(", "= [] for factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block: spec.BlockNumberReference", "= pd.concat(dfs) # format data events = events.sort_index() events = events[['contract_address', 'transaction_hash', 'arg__base_pool']]", "coroutine = evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs =", "import asyncio import pandas as pd if start_block is None: start_block = 12903979", "rpc.async_eth_call( to_address=factory, function_name='pool_count', ) coroutines = [ _async_get_pool_data(p, factory, include_balances=include_balances) for p in", "'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coins': 'coins', 'arg__A': 'A',", "= events.rename( columns={ 'contract_address': 'factory', 'arg__coins': 'coins', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer',", "coins = await rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool], ) coins = [coin for coin", ") -> CurvePoolData: pool = await rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p], ) coins =", "None: start_block = 12903979 events = await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider,", ") ) if eth_address in coins: eth_balance = await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else:", "# async def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider:", "'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coin': 'coin', 'arg__base_pool':", "async def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec", "= None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec = None, verbose: bool =", "factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, )", "ignore tokens=valid_coins, address=pool, ) ) if eth_address in coins: eth_balance = await evm.async_get_eth_balance(pool)", "None, provider: spec.ProviderSpec = None, verbose: bool = False, ) -> spec.DataFrame: if", "_async_get_pool_data( p: int, factory: spec.Address, include_balances: bool = False, ) -> CurvePoolData: pool", "# # # event based # async def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] = None,", "is None: factory_start_block = creation_blocks[factory] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory,", "'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={", "= [ coin for coin in coins if coin not in ['0x0000000000000000000000000000000000000000', eth_address]", "data events = events.sort_index() events = events[ [ 'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A',", "based # async def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None,", "in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block: spec.BlockNumberReference = creation_blocks[ factory ]", "_async_get_pool_data(p, factory, include_balances=include_balances) for p in range(n_pools) ] return await asyncio.gather(*coroutines) class CurvePoolData(TypedDict):", "tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances: typing.Sequence[int | float | None] async def _async_get_pool_data(", "coroutines = [] for factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block:", "'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coins': 'coins', 'arg__A':", "] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coins': 'coins', 'arg__A': 'A', 'arg__fee': 'fee',", "'coin', 'arg__base_pool': 'base_pool', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } ) return events", "if coin not in [eth_address]] valid_coins = [ coin for coin in coins", "-> list[CurvePoolData]: import asyncio n_pools = await rpc.async_eth_call( to_address=factory, function_name='pool_count', ) coroutines =", "valid_coins, ) if eth_address in coins: index = coins.index(eth_address) symbols.insert(index, 'ETH') if include_balances:", "return events async def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None,", "= False, ) -> spec.DataFrame: if start_block is None: start_block = 12903979 events", "async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec = None,", "function_parameters=[p], ) coins = await rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool], ) coins = [coin", "| None] = ( await evm.async_get_erc20s_balance_of( # type: ignore tokens=valid_coins, address=pool, ) )", "[coin for coin in coins if coin not in [eth_address]] valid_coins = [", "evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines)", "coins = [coin for coin in coins if coin not in [eth_address]] valid_coins", "def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec =", "| float | None] = ( await evm.async_get_erc20s_balance_of( # type: ignore tokens=valid_coins, address=pool,", "'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address': 'factory', 'arg__coins':", "spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = {", "[ coin for coin in coins if coin not in ['0x0000000000000000000000000000000000000000', eth_address] ]", "factory: spec.Address, include_balances: bool = False, ) -> list[CurvePoolData]: import asyncio n_pools =", "contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events", "= '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404,", "start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events = pd.concat(dfs)", "provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events = pd.concat(dfs) # format", "= '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } # # #", "'0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4':", "TypedDict from ctc import evm from ctc import rpc from ctc import spec", "'0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } #", "typing from typing_extensions import TypedDict from ctc import evm from ctc import rpc", "CurvePoolData(TypedDict): address: spec.Address tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances: typing.Sequence[int | float | None]", "asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address: spec.Address tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str] balances: typing.Sequence[int | float", "start_block is None: factory_start_block: spec.BlockNumberReference = creation_blocks[ factory ] else: factory_start_block = start_block", "ctc import evm from ctc import rpc from ctc import spec old_pool_factory =", "= events[ [ 'transaction_hash', 'contract_address', 'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events =", "= False, ) -> CurvePoolData: pool = await rpc.async_eth_call( to_address=factory, function_name='pool_list', function_parameters=[p], )", "as pd # gather data coroutines = [] for factory in [old_pool_factory, pool_factory]:", "creation_blocks[ factory ] else: factory_start_block = start_block coroutine = evm.async_get_events( contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block,", "events = pd.concat(dfs) # format data events = events.sort_index() events = events[ [", "end_block=end_block, provider=provider, verbose=verbose, ) events = events[ [ 'transaction_hash', 'contract_address', 'arg__coins', 'arg__A', 'arg__fee',", ") -> spec.DataFrame: import asyncio import pandas as pd if start_block is None:", "= False, ) -> spec.DataFrame: import asyncio import pandas as pd # gather", "in ['0x0000000000000000000000000000000000000000', eth_address] ] symbols = await evm.async_get_erc20s_symbols( valid_coins, ) if eth_address in", "for factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block: spec.BlockNumberReference = creation_blocks[", "= await evm.async_get_erc20s_symbols( valid_coins, ) if eth_address in coins: index = coins.index(eth_address) symbols.insert(index,", "typing.Sequence[int | float | None] async def _async_get_pool_data( p: int, factory: spec.Address, include_balances:", "-> spec.DataFrame: import asyncio import pandas as pd if start_block is None: start_block", "events = events[ [ 'transaction_hash', 'contract_address', 'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events", "list[CurvePoolData]: import asyncio n_pools = await rpc.async_eth_call( to_address=factory, function_name='pool_count', ) coroutines = [", "= await asyncio.gather(*coroutines) events = pd.concat(dfs) # format data events = events.sort_index() events", "= start_block coroutine = evm.async_get_events( contract_address=factory, event_name='BasePoolAdded', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine)", "events = events.rename( columns={ 'contract_address': 'factory', 'arg__base_pool': 'pool', } ) return events async", "'0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, } # # # call based # async def async_get_factory_pool_data( factory:", "asyncio import pandas as pd if start_block is None: start_block = 12903979 #", "return events async def async_get_meta_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None,", "dfs = await asyncio.gather(*coroutines) events = pd.concat(dfs) # format data events = events.sort_index()", "'factory', 'arg__coin': 'coin', 'arg__base_pool': 'base_pool', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } )", "to_address=factory, function_name='pool_list', function_parameters=[p], ) coins = await rpc.async_eth_call( to_address=factory, function_name='get_coins', function_parameters=[pool], ) coins", "events[ [ 'transaction_hash', 'contract_address', 'arg__coins', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename(", "= False, ) -> spec.DataFrame: import asyncio import pandas as pd if start_block", "float | None] async def _async_get_pool_data( p: int, factory: spec.Address, include_balances: bool =", "spec.ProviderSpec = None, verbose: bool = False, ) -> spec.DataFrame: import asyncio import", "range(n_pools) ] return await asyncio.gather(*coroutines) class CurvePoolData(TypedDict): address: spec.Address tokens: typing.Sequence[spec.Address] symbols: typing.Sequence[str]", "coins: eth_balance = await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else: balances = [None for coin", "start_block=start_block, end_block=end_block, provider=provider, verbose=verbose, ) events = events[ [ 'transaction_hash', 'contract_address', 'arg__coins', 'arg__A',", "coins] return { 'address': pool, 'tokens': coins, 'symbols': symbols, 'balances': balances, } #", "def async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec =", "# # event based # async def async_get_base_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block:", "spec.Address, include_balances: bool = False, ) -> list[CurvePoolData]: import asyncio n_pools = await", "coin in coins if coin not in [eth_address]] valid_coins = [ coin for", "coins, 'symbols': symbols, 'balances': balances, } # # # event based # async", "contract_address=factory, event_name='MetaPoolDeployed', start_block=factory_start_block, end_block=end_block, provider=provider, verbose=verbose, ) coroutines.append(coroutine) dfs = await asyncio.gather(*coroutines) events", "bool = False, ) -> list[CurvePoolData]: import asyncio n_pools = await rpc.async_eth_call( to_address=factory,", "12903979 events = await evm.async_get_events( contract_address=pool_factory, event_name='PlainPoolDeployed', start_block=start_block, end_block=end_block, provider=provider, verbose=verbose, ) events", "__future__ import annotations import typing from typing_extensions import TypedDict from ctc import evm", "await evm.async_get_eth_balance(pool) balances.insert(index, eth_balance) else: balances = [None for coin in coins] return", "'factory', 'arg__coins': 'coins', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer': 'deployer', } ) return events", "[] for factory in [old_pool_factory, pool_factory]: if start_block is None: factory_start_block = creation_blocks[factory]", "rpc from ctc import spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address =", "int, factory: spec.Address, include_balances: bool = False, ) -> CurvePoolData: pool = await", "= '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks = { '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404, '0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979, }", "eth_address] ] symbols = await evm.async_get_erc20s_symbols( valid_coins, ) if eth_address in coins: index", "pd # gather data coroutines = [] for factory in [old_pool_factory, pool_factory]: if", "'contract_address', 'arg__coin', 'arg__base_pool', 'arg__A', 'arg__fee', 'arg__deployer', ] ] events = events.rename( columns={ 'contract_address':", "for coin in coins] return { 'address': pool, 'tokens': coins, 'symbols': symbols, 'balances':", "typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec = None, verbose: bool", "format data events = events.sort_index() events = events[ [ 'transaction_hash', 'contract_address', 'arg__coin', 'arg__base_pool',", "async_get_plain_pools( start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec = None,", "annotations import typing from typing_extensions import TypedDict from ctc import evm from ctc", "start_block: typing.Optional[spec.BlockNumberReference] = None, end_block: typing.Optional[spec.BlockNumberReference] = None, provider: spec.ProviderSpec = None, verbose:", "events.rename( columns={ 'contract_address': 'factory', 'arg__coin': 'coin', 'arg__base_pool': 'base_pool', 'arg__A': 'A', 'arg__fee': 'fee', 'arg__deployer':", "valid_coins = [ coin for coin in coins if coin not in ['0x0000000000000000000000000000000000000000',", "coins if coin not in ['0x0000000000000000000000000000000000000000', eth_address] ] symbols = await evm.async_get_erc20s_symbols( valid_coins,", "[None for coin in coins] return { 'address': pool, 'tokens': coins, 'symbols': symbols,", "= [None for coin in coins] return { 'address': pool, 'tokens': coins, 'symbols':", "to_address=factory, function_name='get_coins', function_parameters=[pool], ) coins = [coin for coin in coins if coin", "= False, ) -> list[CurvePoolData]: import asyncio n_pools = await rpc.async_eth_call( to_address=factory, function_name='pool_count',", "ctc import spec old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae' pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4' eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' creation_blocks", "if coin not in ['0x0000000000000000000000000000000000000000', eth_address] ] symbols = await evm.async_get_erc20s_symbols( valid_coins, )", "typing.MutableSequence[int | float | None] = ( await evm.async_get_erc20s_balance_of( # type: ignore tokens=valid_coins,", "n_pools = await rpc.async_eth_call( to_address=factory, function_name='pool_count', ) coroutines = [ _async_get_pool_data(p, factory, include_balances=include_balances)", "for coin in coins if coin not in [eth_address]] valid_coins = [ coin", "not in [eth_address]] valid_coins = [ coin for coin in coins if coin", "{ 'address': pool, 'tokens': coins, 'symbols': symbols, 'balances': balances, } # # #", "'arg__base_pool']] events = events.rename( columns={ 'contract_address': 'factory', 'arg__base_pool': 'pool', } ) return events" ]
[ "== \"turn on\": lights[y, x] = True elif match[1] == \"turn off\": lights[y,", "max(lights[y, x] - 1, 0) elif match[1] == \"toggle\": lights[y1:y2 + 1, x1:x2", "(\\d+),(\\d+)\") def day6(fileName): lights = np.zeros((1000, 1000), dtype=bool) with open(fileName) as infile: for", "= int(match[4]) y1 = int(match[3]) y2 = int(match[5]) if match[1] == \"turn on\":", "(\\d+),(\\d+) through (\\d+),(\\d+)\") def day6(fileName): lights = np.zeros((1000, 1000), dtype=bool) with open(fileName) as", "import re lineRegex = re.compile(r\"(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\") def day6(fileName): lights", "print(f\"There are {lights.sum()} lights!\") def day6b(fileName): lights = np.zeros((1000, 1000), dtype=int) with open(fileName)", "{lights.sum()} lights!\") def day6b(fileName): lights = np.zeros((1000, 1000), dtype=int) with open(fileName) as infile:", "np.zeros((1000, 1000), dtype=bool) with open(fileName) as infile: for line in infile: match =", "lineRegex.match(line) if match: x1 = int(match[2]) x2 = int(match[4]) y1 = int(match[3]) y2", "infile: match = lineRegex.match(line) if match: for x in range(int(match[2]), int(match[4]) + 1):", "match[1] == \"turn off\": lights[y, x] = False elif match[1] == \"toggle\": lights[y,", "1): for y in range(y1, y2 + 1): lights[y, x] = max(lights[y, x]", "infile: for line in infile: match = lineRegex.match(line) if match: for x in", "lights[y, x] = True elif match[1] == \"turn off\": lights[y, x] = False", "elif match[1] == \"toggle\": lights[y1:y2 + 1, x1:x2 + 1] += 2 else:", "= False elif match[1] == \"toggle\": lights[y, x] = not lights[y, x] else:", "+ 1] += 1 elif match[1] == \"turn off\": for x in range(x1,", "match: x1 = int(match[2]) x2 = int(match[4]) y1 = int(match[3]) y2 = int(match[5])", "= re.compile(r\"(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\") def day6(fileName): lights = np.zeros((1000, 1000),", "x] = True elif match[1] == \"turn off\": lights[y, x] = False elif", "in range(int(match[2]), int(match[4]) + 1): for y in range(int(match[3]), int(match[5]) + 1): if", "int(match[5]) + 1): if match[1] == \"turn on\": lights[y, x] = True elif", "\"turn off\": for x in range(x1, x2 + 1): for y in range(y1,", "+ 1): if match[1] == \"turn on\": lights[y, x] = True elif match[1]", "through (\\d+),(\\d+)\") def day6(fileName): lights = np.zeros((1000, 1000), dtype=bool) with open(fileName) as infile:", "def day6(fileName): lights = np.zeros((1000, 1000), dtype=bool) with open(fileName) as infile: for line", "= np.zeros((1000, 1000), dtype=int) with open(fileName) as infile: for line in infile: match", "if match[1] == \"turn on\": lights[y, x] = True elif match[1] == \"turn", "y2 + 1): lights[y, x] = max(lights[y, x] - 1, 0) elif match[1]", "in infile: match = lineRegex.match(line) if match: for x in range(int(match[2]), int(match[4]) +", "np import re lineRegex = re.compile(r\"(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\") def day6(fileName):", "elif match[1] == \"turn off\": lights[y, x] = False elif match[1] == \"toggle\":", "dtype=bool) with open(fileName) as infile: for line in infile: match = lineRegex.match(line) if", "range(y1, y2 + 1): lights[y, x] = max(lights[y, x] - 1, 0) elif", "lights[y, x] else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"There are {lights.sum()} lights!\") def day6b(fileName):", "match: for x in range(int(match[2]), int(match[4]) + 1): for y in range(int(match[3]), int(match[5])", "infile: match = lineRegex.match(line) if match: x1 = int(match[2]) x2 = int(match[4]) y1", "1 elif match[1] == \"turn off\": for x in range(x1, x2 + 1):", "as infile: for line in infile: match = lineRegex.match(line) if match: for x", "line in infile: match = lineRegex.match(line) if match: for x in range(int(match[2]), int(match[4])", "\"turn on\": lights[y1:y2 + 1, x1:x2 + 1] += 1 elif match[1] ==", "match[1] == \"turn on\": lights[y, x] = True elif match[1] == \"turn off\":", "raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"There are {lights.sum()} lights!\") def day6b(fileName): lights = np.zeros((1000,", "directive: {match[1]}\") print(f\"There are {lights.sum()} lights!\") def day6b(fileName): lights = np.zeros((1000, 1000), dtype=int)", "int(match[2]) x2 = int(match[4]) y1 = int(match[3]) y2 = int(match[5]) if match[1] ==", "= True elif match[1] == \"turn off\": lights[y, x] = False elif match[1]", "lights!\") def day6b(fileName): lights = np.zeros((1000, 1000), dtype=int) with open(fileName) as infile: for", "1000), dtype=bool) with open(fileName) as infile: for line in infile: match = lineRegex.match(line)", "range(x1, x2 + 1): for y in range(y1, y2 + 1): lights[y, x]", "+ 1, x1:x2 + 1] += 1 elif match[1] == \"turn off\": for", "import numpy as np import re lineRegex = re.compile(r\"(turn on|turn off|toggle) (\\d+),(\\d+) through", "+= 2 else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"Brightness: {lights.sum()}\") #day6(\"6test.txt\") #day6(\"6.txt\") day6b(\"6btest.txt\") day6b(\"6.txt\")", "+ 1): for y in range(int(match[3]), int(match[5]) + 1): if match[1] == \"turn", "day6b(fileName): lights = np.zeros((1000, 1000), dtype=int) with open(fileName) as infile: for line in", "1] += 2 else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"Brightness: {lights.sum()}\") #day6(\"6test.txt\") #day6(\"6.txt\") day6b(\"6btest.txt\")", "lights[y1:y2 + 1, x1:x2 + 1] += 2 else: raise ValueError(f\"Unknown directive: {match[1]}\")", "elif match[1] == \"turn off\": for x in range(x1, x2 + 1): for", "2 else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"Brightness: {lights.sum()}\") #day6(\"6test.txt\") #day6(\"6.txt\") day6b(\"6btest.txt\") day6b(\"6.txt\") #15343601", "x] = not lights[y, x] else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"There are {lights.sum()}", "elif match[1] == \"toggle\": lights[y, x] = not lights[y, x] else: raise ValueError(f\"Unknown", "for x in range(x1, x2 + 1): for y in range(y1, y2 +", "match[1] == \"turn on\": lights[y1:y2 + 1, x1:x2 + 1] += 1 elif", "= np.zeros((1000, 1000), dtype=bool) with open(fileName) as infile: for line in infile: match", "lights[y1:y2 + 1, x1:x2 + 1] += 1 elif match[1] == \"turn off\":", "on\": lights[y, x] = True elif match[1] == \"turn off\": lights[y, x] =", "match[1] == \"turn off\": for x in range(x1, x2 + 1): for y", "x] = max(lights[y, x] - 1, 0) elif match[1] == \"toggle\": lights[y1:y2 +", "if match: for x in range(int(match[2]), int(match[4]) + 1): for y in range(int(match[3]),", "in range(int(match[3]), int(match[5]) + 1): if match[1] == \"turn on\": lights[y, x] =", "if match: x1 = int(match[2]) x2 = int(match[4]) y1 = int(match[3]) y2 =", "match = lineRegex.match(line) if match: for x in range(int(match[2]), int(match[4]) + 1): for", "if match[1] == \"turn on\": lights[y1:y2 + 1, x1:x2 + 1] += 1", "off\": for x in range(x1, x2 + 1): for y in range(y1, y2", "x1 = int(match[2]) x2 = int(match[4]) y1 = int(match[3]) y2 = int(match[5]) if", "{match[1]}\") print(f\"There are {lights.sum()} lights!\") def day6b(fileName): lights = np.zeros((1000, 1000), dtype=int) with", "else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"There are {lights.sum()} lights!\") def day6b(fileName): lights =", "lights = np.zeros((1000, 1000), dtype=int) with open(fileName) as infile: for line in infile:", "+ 1): for y in range(y1, y2 + 1): lights[y, x] = max(lights[y,", "dtype=int) with open(fileName) as infile: for line in infile: match = lineRegex.match(line) if", "open(fileName) as infile: for line in infile: match = lineRegex.match(line) if match: for", "= int(match[2]) x2 = int(match[4]) y1 = int(match[3]) y2 = int(match[5]) if match[1]", "re lineRegex = re.compile(r\"(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\") def day6(fileName): lights =", "= int(match[5]) if match[1] == \"turn on\": lights[y1:y2 + 1, x1:x2 + 1]", "== \"turn off\": for x in range(x1, x2 + 1): for y in", "numpy as np import re lineRegex = re.compile(r\"(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\")", "for line in infile: match = lineRegex.match(line) if match: x1 = int(match[2]) x2", "+ 1] += 2 else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"Brightness: {lights.sum()}\") #day6(\"6test.txt\") #day6(\"6.txt\")", "False elif match[1] == \"toggle\": lights[y, x] = not lights[y, x] else: raise", "as infile: for line in infile: match = lineRegex.match(line) if match: x1 =", "0) elif match[1] == \"toggle\": lights[y1:y2 + 1, x1:x2 + 1] += 2", "1): if match[1] == \"turn on\": lights[y, x] = True elif match[1] ==", "ValueError(f\"Unknown directive: {match[1]}\") print(f\"There are {lights.sum()} lights!\") def day6b(fileName): lights = np.zeros((1000, 1000),", "y2 = int(match[5]) if match[1] == \"turn on\": lights[y1:y2 + 1, x1:x2 +", "y in range(int(match[3]), int(match[5]) + 1): if match[1] == \"turn on\": lights[y, x]", "x] = False elif match[1] == \"toggle\": lights[y, x] = not lights[y, x]", "int(match[3]) y2 = int(match[5]) if match[1] == \"turn on\": lights[y1:y2 + 1, x1:x2", "range(int(match[3]), int(match[5]) + 1): if match[1] == \"turn on\": lights[y, x] = True", "== \"turn off\": lights[y, x] = False elif match[1] == \"toggle\": lights[y, x]", "infile: for line in infile: match = lineRegex.match(line) if match: x1 = int(match[2])", "= not lights[y, x] else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"There are {lights.sum()} lights!\")", "for x in range(int(match[2]), int(match[4]) + 1): for y in range(int(match[3]), int(match[5]) +", "open(fileName) as infile: for line in infile: match = lineRegex.match(line) if match: x1", "for line in infile: match = lineRegex.match(line) if match: for x in range(int(match[2]),", "lineRegex.match(line) if match: for x in range(int(match[2]), int(match[4]) + 1): for y in", "- 1, 0) elif match[1] == \"toggle\": lights[y1:y2 + 1, x1:x2 + 1]", "in infile: match = lineRegex.match(line) if match: x1 = int(match[2]) x2 = int(match[4])", "range(int(match[2]), int(match[4]) + 1): for y in range(int(match[3]), int(match[5]) + 1): if match[1]", "lights[y, x] = max(lights[y, x] - 1, 0) elif match[1] == \"toggle\": lights[y1:y2", "with open(fileName) as infile: for line in infile: match = lineRegex.match(line) if match:", "= int(match[3]) y2 = int(match[5]) if match[1] == \"turn on\": lights[y1:y2 + 1,", "line in infile: match = lineRegex.match(line) if match: x1 = int(match[2]) x2 =", "day6(fileName): lights = np.zeros((1000, 1000), dtype=bool) with open(fileName) as infile: for line in", "+= 1 elif match[1] == \"turn off\": for x in range(x1, x2 +", "= max(lights[y, x] - 1, 0) elif match[1] == \"toggle\": lights[y1:y2 + 1,", "\"turn off\": lights[y, x] = False elif match[1] == \"toggle\": lights[y, x] =", "not lights[y, x] else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"There are {lights.sum()} lights!\") def", "int(match[4]) + 1): for y in range(int(match[3]), int(match[5]) + 1): if match[1] ==", "1000), dtype=int) with open(fileName) as infile: for line in infile: match = lineRegex.match(line)", "as np import re lineRegex = re.compile(r\"(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\") def", "match[1] == \"toggle\": lights[y1:y2 + 1, x1:x2 + 1] += 2 else: raise", "1, x1:x2 + 1] += 2 else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"Brightness: {lights.sum()}\")", "lineRegex = re.compile(r\"(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\") def day6(fileName): lights = np.zeros((1000,", "1): lights[y, x] = max(lights[y, x] - 1, 0) elif match[1] == \"toggle\":", "+ 1, x1:x2 + 1] += 2 else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"Brightness:", "for y in range(y1, y2 + 1): lights[y, x] = max(lights[y, x] -", "\"toggle\": lights[y, x] = not lights[y, x] else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"There", "re.compile(r\"(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\") def day6(fileName): lights = np.zeros((1000, 1000), dtype=bool)", "== \"turn on\": lights[y1:y2 + 1, x1:x2 + 1] += 1 elif match[1]", "= lineRegex.match(line) if match: for x in range(int(match[2]), int(match[4]) + 1): for y", "def day6b(fileName): lights = np.zeros((1000, 1000), dtype=int) with open(fileName) as infile: for line", "1): for y in range(int(match[3]), int(match[5]) + 1): if match[1] == \"turn on\":", "1, 0) elif match[1] == \"toggle\": lights[y1:y2 + 1, x1:x2 + 1] +=", "+ 1): lights[y, x] = max(lights[y, x] - 1, 0) elif match[1] ==", "in range(y1, y2 + 1): lights[y, x] = max(lights[y, x] - 1, 0)", "off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\") def day6(fileName): lights = np.zeros((1000, 1000), dtype=bool) with open(fileName)", "True elif match[1] == \"turn off\": lights[y, x] = False elif match[1] ==", "match[1] == \"toggle\": lights[y, x] = not lights[y, x] else: raise ValueError(f\"Unknown directive:", "x2 = int(match[4]) y1 = int(match[3]) y2 = int(match[5]) if match[1] == \"turn", "x] - 1, 0) elif match[1] == \"toggle\": lights[y1:y2 + 1, x1:x2 +", "off\": lights[y, x] = False elif match[1] == \"toggle\": lights[y, x] = not", "lights[y, x] = not lights[y, x] else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"There are", "lights[y, x] = False elif match[1] == \"toggle\": lights[y, x] = not lights[y,", "1, x1:x2 + 1] += 1 elif match[1] == \"turn off\": for x", "in range(x1, x2 + 1): for y in range(y1, y2 + 1): lights[y,", "lights = np.zeros((1000, 1000), dtype=bool) with open(fileName) as infile: for line in infile:", "for y in range(int(match[3]), int(match[5]) + 1): if match[1] == \"turn on\": lights[y,", "\"turn on\": lights[y, x] = True elif match[1] == \"turn off\": lights[y, x]", "y in range(y1, y2 + 1): lights[y, x] = max(lights[y, x] - 1,", "x in range(x1, x2 + 1): for y in range(y1, y2 + 1):", "x2 + 1): for y in range(y1, y2 + 1): lights[y, x] =", "\"toggle\": lights[y1:y2 + 1, x1:x2 + 1] += 2 else: raise ValueError(f\"Unknown directive:", "1] += 1 elif match[1] == \"turn off\": for x in range(x1, x2", "x in range(int(match[2]), int(match[4]) + 1): for y in range(int(match[3]), int(match[5]) + 1):", "== \"toggle\": lights[y, x] = not lights[y, x] else: raise ValueError(f\"Unknown directive: {match[1]}\")", "are {lights.sum()} lights!\") def day6b(fileName): lights = np.zeros((1000, 1000), dtype=int) with open(fileName) as", "x1:x2 + 1] += 1 elif match[1] == \"turn off\": for x in", "np.zeros((1000, 1000), dtype=int) with open(fileName) as infile: for line in infile: match =", "x] else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"There are {lights.sum()} lights!\") def day6b(fileName): lights", "match = lineRegex.match(line) if match: x1 = int(match[2]) x2 = int(match[4]) y1 =", "int(match[5]) if match[1] == \"turn on\": lights[y1:y2 + 1, x1:x2 + 1] +=", "x1:x2 + 1] += 2 else: raise ValueError(f\"Unknown directive: {match[1]}\") print(f\"Brightness: {lights.sum()}\") #day6(\"6test.txt\")", "int(match[4]) y1 = int(match[3]) y2 = int(match[5]) if match[1] == \"turn on\": lights[y1:y2", "on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)\") def day6(fileName): lights = np.zeros((1000, 1000), dtype=bool) with", "= lineRegex.match(line) if match: x1 = int(match[2]) x2 = int(match[4]) y1 = int(match[3])", "on\": lights[y1:y2 + 1, x1:x2 + 1] += 1 elif match[1] == \"turn", "y1 = int(match[3]) y2 = int(match[5]) if match[1] == \"turn on\": lights[y1:y2 +", "== \"toggle\": lights[y1:y2 + 1, x1:x2 + 1] += 2 else: raise ValueError(f\"Unknown" ]
[ "else: LOG.info( \"None of the changed paths correspond to the current local\" \"configuration.\"", "the changed paths correspond to pyre configuration.\") if self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration, file)", "subscription) for (root, name) in zip(roots, names) ] def _handle_response(self, response: Dict[str, Any])", "import Subscription, WatchmanSubscriber LOG: Logger = logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor watches", "-> str: return \"configuration_monitor\" @property def _subscriptions(self) -> List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names", "and ProjectFileMonitor when a configuration changes. Logs are found in .pyre/configuration_monitor/configuration_monitor.log To kill", "from .pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>. \"\"\" def __init__( self, arguments, configuration, analysis_directory: AnalysisDirectory", "the path to the project configuration file and compare it with the #", "stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() # TODO(T54088045): Find all local pyre servers running underneath #", "class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor watches only for .pyre_configuration(.local) files, and will kill", "self.configuration, self.analysis_directory ).run() else: LOG.info( \"None of the changed paths correspond to the", "watches only for .pyre_configuration(.local) files, and will kill the corresponding server and ProjectFileMonitor", "; kill <pid>. \"\"\" def __init__( self, arguments, configuration, analysis_directory: AnalysisDirectory ) ->", "file) for file in absolute_path ): LOG.info(\"Local configuration changed. Stopping pyre server.\") stop.Stop(", "logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor watches only for .pyre_configuration(.local) files, and will", "is_parent(self.arguments.local_configuration, file) for file in absolute_path ): LOG.info(\"Local configuration changed. Stopping pyre server.\")", "= configuration self.analysis_directory = analysis_directory @property def _name(self) -> str: return \"configuration_monitor\" @property", "all local pyre servers running underneath # and stop them. else: LOG.info(\"None of", "\"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ], ], \"fields\": [\"name\"], } return [ Subscription(root, name, subscription)", "LOG.info( \"Update to configuration at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path = [ os.path.join(response[\"root\"],", "_name(self) -> str: return \"configuration_monitor\" @property def _subscriptions(self) -> List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"]", "pyre server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() # TODO(T54088045): Find all local pyre servers running", "self.configuration = configuration self.analysis_directory = analysis_directory @property def _name(self) -> str: return \"configuration_monitor\"", "root in roots] subscription = { \"expression\": [ \"allof\", [\"type\", \"f\"], [\"not\", \"empty\"],", "os.path.join(response[\"root\"], file) for file in response[\"files\"] ] # Find the path to the", "the root directory of this source tree. # pyre-unsafe import logging import os", "List from .analysis_directory import AnalysisDirectory from .commands import stop from .configuration import CONFIGURATION_FILE", "configuration changes. Logs are found in .pyre/configuration_monitor/configuration_monitor.log To kill a monitor, get pid", "def _subscriptions(self) -> List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for root in", "\",\".join(response[\"files\"])), ) absolute_path = [ os.path.join(response[\"root\"], file) for file in response[\"files\"] ] #", "project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration == file) for file in absolute_path): LOG.info(\"Pyre", "= [ os.path.join(response[\"root\"], file) for file in response[\"files\"] ] # Find the path", "Copyright (c) 2016-present, Facebook, Inc. # # This source code is licensed under", "file) for file in response[\"files\"] ] # Find the path to the project", "file in response[\"files\"] ] # Find the path to the project configuration file", "\"pyre_configuration\"], ], ], \"fields\": [\"name\"], } return [ Subscription(root, name, subscription) for (root,", "AnalysisDirectory from .commands import stop from .configuration import CONFIGURATION_FILE from .filesystem import is_parent", "str: return \"configuration_monitor\" @property def _subscriptions(self) -> List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names =", "AnalysisDirectory ) -> None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments = arguments self.configuration = configuration", "CONFIGURATION_FILE) if any((project_configuration == file) for file in absolute_path): LOG.info(\"Pyre configuration changed. Stopping", "# and stop them. else: LOG.info(\"None of the changed paths correspond to pyre", "<pid>. \"\"\" def __init__( self, arguments, configuration, analysis_directory: AnalysisDirectory ) -> None: super(ConfigurationMonitor,", ".pyre_configuration(.local) files, and will kill the corresponding server and ProjectFileMonitor when a configuration", "\"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ], ], \"fields\": [\"name\"], } return [ Subscription(root,", "%s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path = [ os.path.join(response[\"root\"], file) for file in response[\"files\"]", "configuration, analysis_directory: AnalysisDirectory ) -> None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments = arguments self.configuration", "This source code is licensed under the MIT license found in the #", "if any((project_configuration == file) for file in absolute_path): LOG.info(\"Pyre configuration changed. Stopping pyre", "== file) for file in absolute_path): LOG.info(\"Pyre configuration changed. Stopping pyre server.\") stop.Stop(self.arguments,", "local pyre servers running underneath # and stop them. else: LOG.info(\"None of the", "# # This source code is licensed under the MIT license found in", "import stop from .configuration import CONFIGURATION_FILE from .filesystem import is_parent from .watchman_subscriber import", "license found in the # LICENSE file in the root directory of this", "root directory of this source tree. # pyre-unsafe import logging import os from", "List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for root in roots] subscription =", "self.analysis_directory).run() # TODO(T54088045): Find all local pyre servers running underneath # and stop", "tree. # pyre-unsafe import logging import os from logging import Logger from typing", "analysis_directory: AnalysisDirectory ) -> None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments = arguments self.configuration =", "Any, Dict, List from .analysis_directory import AnalysisDirectory from .commands import stop from .configuration", "server and ProjectFileMonitor when a configuration changes. Logs are found in .pyre/configuration_monitor/configuration_monitor.log To", "[\"pyre_monitor_{}\".format(os.path.basename(root)) for root in roots] subscription = { \"expression\": [ \"allof\", [\"type\", \"f\"],", "any((project_configuration == file) for file in absolute_path): LOG.info(\"Pyre configuration changed. Stopping pyre server.\")", "server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() # TODO(T54088045): Find all local pyre servers running underneath", "[ os.path.join(response[\"root\"], file) for file in response[\"files\"] ] # Find the path to", "from typing import Any, Dict, List from .analysis_directory import AnalysisDirectory from .commands import", "in absolute_path ): LOG.info(\"Local configuration changed. Stopping pyre server.\") stop.Stop( self.arguments, self.configuration, self.analysis_directory", "kill <pid>. \"\"\" def __init__( self, arguments, configuration, analysis_directory: AnalysisDirectory ) -> None:", "in roots] subscription = { \"expression\": [ \"allof\", [\"type\", \"f\"], [\"not\", \"empty\"], [", "# list of changed configuration files. project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration ==", "found in the # LICENSE file in the root directory of this source", "LOG: Logger = logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor watches only for .pyre_configuration(.local)", ") absolute_path = [ os.path.join(response[\"root\"], file) for file in response[\"files\"] ] # Find", "source tree. # pyre-unsafe import logging import os from logging import Logger from", "configuration changed. Stopping pyre server.\") stop.Stop( self.arguments, self.configuration, self.analysis_directory ).run() else: LOG.info( \"None", "of the changed paths correspond to pyre configuration.\") if self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration,", "the project configuration file and compare it with the # list of changed", "Facebook, Inc. # # This source code is licensed under the MIT license", "directory of this source tree. # pyre-unsafe import logging import os from logging", ".analysis_directory import AnalysisDirectory from .commands import stop from .configuration import CONFIGURATION_FILE from .filesystem", "return \"configuration_monitor\" @property def _subscriptions(self) -> List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root))", "and will kill the corresponding server and ProjectFileMonitor when a configuration changes. Logs", "Find the path to the project configuration file and compare it with the", "self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration, file) for file in absolute_path ): LOG.info(\"Local configuration changed.", "[\"suffix\", \"pyre_configuration\"], ], ], \"fields\": [\"name\"], } return [ Subscription(root, name, subscription) for", "from .analysis_directory import AnalysisDirectory from .commands import stop from .configuration import CONFIGURATION_FILE from", "licensed under the MIT license found in the # LICENSE file in the", "logging import Logger from typing import Any, Dict, List from .analysis_directory import AnalysisDirectory", "for root in roots] subscription = { \"expression\": [ \"allof\", [\"type\", \"f\"], [\"not\",", "found in .pyre/configuration_monitor/configuration_monitor.log To kill a monitor, get pid from .pyre/configuration_monitor/configuration_monitor.pid ; kill", "for file in absolute_path ): LOG.info(\"Local configuration changed. Stopping pyre server.\") stop.Stop( self.arguments,", "Dict, List from .analysis_directory import AnalysisDirectory from .commands import stop from .configuration import", "file in absolute_path ): LOG.info(\"Local configuration changed. Stopping pyre server.\") stop.Stop( self.arguments, self.configuration,", ".configuration import CONFIGURATION_FILE from .filesystem import is_parent from .watchman_subscriber import Subscription, WatchmanSubscriber LOG:", "to pyre configuration.\") if self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration, file) for file in absolute_path", "Subscription, WatchmanSubscriber LOG: Logger = logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor watches only", "name, subscription) for (root, name) in zip(roots, names) ] def _handle_response(self, response: Dict[str,", "changed paths correspond to pyre configuration.\") if self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration, file) for", "to the project configuration file and compare it with the # list of", "paths correspond to pyre configuration.\") if self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration, file) for file", "kill the corresponding server and ProjectFileMonitor when a configuration changes. Logs are found", "if self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration, file) for file in absolute_path ): LOG.info(\"Local configuration", "in .pyre/configuration_monitor/configuration_monitor.log To kill a monitor, get pid from .pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>.", "super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments = arguments self.configuration = configuration self.analysis_directory = analysis_directory @property", "Stopping pyre server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() # TODO(T54088045): Find all local pyre servers", "ProjectFileMonitor when a configuration changes. Logs are found in .pyre/configuration_monitor/configuration_monitor.log To kill a", "os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration == file) for file in absolute_path): LOG.info(\"Pyre configuration changed.", "corresponding server and ProjectFileMonitor when a configuration changes. Logs are found in .pyre/configuration_monitor/configuration_monitor.log", "The ConfigurationMonitor watches only for .pyre_configuration(.local) files, and will kill the corresponding server", "the # list of changed configuration files. project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration", "[ \"allof\", [\"type\", \"f\"], [\"not\", \"empty\"], [ \"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ],", "for .pyre_configuration(.local) files, and will kill the corresponding server and ProjectFileMonitor when a", "configuration file and compare it with the # list of changed configuration files.", "servers running underneath # and stop them. else: LOG.info(\"None of the changed paths", "MIT license found in the # LICENSE file in the root directory of", "pid from .pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>. \"\"\" def __init__( self, arguments, configuration, analysis_directory:", "response[\"files\"] ] # Find the path to the project configuration file and compare", "# Copyright (c) 2016-present, Facebook, Inc. # # This source code is licensed", "zip(roots, names) ] def _handle_response(self, response: Dict[str, Any]) -> None: LOG.info( \"Update to", "the # LICENSE file in the root directory of this source tree. #", "self).__init__(configuration, analysis_directory) self.arguments = arguments self.configuration = configuration self.analysis_directory = analysis_directory @property def", "-> List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for root in roots] subscription", "files, and will kill the corresponding server and ProjectFileMonitor when a configuration changes.", "import Any, Dict, List from .analysis_directory import AnalysisDirectory from .commands import stop from", ".commands import stop from .configuration import CONFIGURATION_FILE from .filesystem import is_parent from .watchman_subscriber", "file) for file in absolute_path): LOG.info(\"Pyre configuration changed. Stopping pyre server.\") stop.Stop(self.arguments, self.configuration,", "# TODO(T54088045): Find all local pyre servers running underneath # and stop them.", "stop from .configuration import CONFIGURATION_FILE from .filesystem import is_parent from .watchman_subscriber import Subscription,", "in zip(roots, names) ] def _handle_response(self, response: Dict[str, Any]) -> None: LOG.info( \"Update", "names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for root in roots] subscription = { \"expression\": [ \"allof\",", "path to the project configuration file and compare it with the # list", "configuration files. project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration == file) for file in", "stop them. else: LOG.info(\"None of the changed paths correspond to pyre configuration.\") if", "are found in .pyre/configuration_monitor/configuration_monitor.log To kill a monitor, get pid from .pyre/configuration_monitor/configuration_monitor.pid ;", "__init__( self, arguments, configuration, analysis_directory: AnalysisDirectory ) -> None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments", "pyre server.\") stop.Stop( self.arguments, self.configuration, self.analysis_directory ).run() else: LOG.info( \"None of the changed", "{ \"expression\": [ \"allof\", [\"type\", \"f\"], [\"not\", \"empty\"], [ \"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\",", "changed. Stopping pyre server.\") stop.Stop( self.arguments, self.configuration, self.analysis_directory ).run() else: LOG.info( \"None of", "import os from logging import Logger from typing import Any, Dict, List from", "server.\") stop.Stop( self.arguments, self.configuration, self.analysis_directory ).run() else: LOG.info( \"None of the changed paths", "None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments = arguments self.configuration = configuration self.analysis_directory = analysis_directory", "get pid from .pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>. \"\"\" def __init__( self, arguments, configuration,", "LOG.info( \"None of the changed paths correspond to the current local\" \"configuration.\" )", "in the # LICENSE file in the root directory of this source tree.", "CONFIGURATION_FILE from .filesystem import is_parent from .watchman_subscriber import Subscription, WatchmanSubscriber LOG: Logger =", "_subscriptions(self) -> List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for root in roots]", "ConfigurationMonitor watches only for .pyre_configuration(.local) files, and will kill the corresponding server and", "import is_parent from .watchman_subscriber import Subscription, WatchmanSubscriber LOG: Logger = logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber):", "os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path = [ os.path.join(response[\"root\"], file) for file in response[\"files\"] ]", "and stop them. else: LOG.info(\"None of the changed paths correspond to pyre configuration.\")", "def __init__( self, arguments, configuration, analysis_directory: AnalysisDirectory ) -> None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory)", "Any]) -> None: LOG.info( \"Update to configuration at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path", "], ], \"fields\": [\"name\"], } return [ Subscription(root, name, subscription) for (root, name)", "Stopping pyre server.\") stop.Stop( self.arguments, self.configuration, self.analysis_directory ).run() else: LOG.info( \"None of the", "@property def _name(self) -> str: return \"configuration_monitor\" @property def _subscriptions(self) -> List[Subscription]: roots", ") -> None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments = arguments self.configuration = configuration self.analysis_directory", "], \"fields\": [\"name\"], } return [ Subscription(root, name, subscription) for (root, name) in", "file in the root directory of this source tree. # pyre-unsafe import logging", "Subscription(root, name, subscription) for (root, name) in zip(roots, names) ] def _handle_response(self, response:", "TODO(T54088045): Find all local pyre servers running underneath # and stop them. else:", "[\"not\", \"empty\"], [ \"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ], ], \"fields\": [\"name\"], }", "under the MIT license found in the # LICENSE file in the root", "kill a monitor, get pid from .pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>. \"\"\" def __init__(", "= analysis_directory @property def _name(self) -> str: return \"configuration_monitor\" @property def _subscriptions(self) ->", "this source tree. # pyre-unsafe import logging import os from logging import Logger", "\"empty\"], [ \"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ], ], \"fields\": [\"name\"], } return", "of changed configuration files. project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration == file) for", "-> None: LOG.info( \"Update to configuration at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path =", "pyre servers running underneath # and stop them. else: LOG.info(\"None of the changed", "pyre-unsafe import logging import os from logging import Logger from typing import Any,", ").run() else: LOG.info( \"None of the changed paths correspond to the current local\"", "changed. Stopping pyre server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() # TODO(T54088045): Find all local pyre", "of this source tree. # pyre-unsafe import logging import os from logging import", "To kill a monitor, get pid from .pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>. \"\"\" def", "when a configuration changes. Logs are found in .pyre/configuration_monitor/configuration_monitor.log To kill a monitor,", "a monitor, get pid from .pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>. \"\"\" def __init__( self,", "Find all local pyre servers running underneath # and stop them. else: LOG.info(\"None", "= logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor watches only for .pyre_configuration(.local) files, and", "only for .pyre_configuration(.local) files, and will kill the corresponding server and ProjectFileMonitor when", "for (root, name) in zip(roots, names) ] def _handle_response(self, response: Dict[str, Any]) ->", "typing import Any, Dict, List from .analysis_directory import AnalysisDirectory from .commands import stop", "changed configuration files. project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration == file) for file", "arguments self.configuration = configuration self.analysis_directory = analysis_directory @property def _name(self) -> str: return", "if any( is_parent(self.arguments.local_configuration, file) for file in absolute_path ): LOG.info(\"Local configuration changed. Stopping", "monitor, get pid from .pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>. \"\"\" def __init__( self, arguments,", "[ Subscription(root, name, subscription) for (root, name) in zip(roots, names) ] def _handle_response(self,", "the MIT license found in the # LICENSE file in the root directory", "import CONFIGURATION_FILE from .filesystem import is_parent from .watchman_subscriber import Subscription, WatchmanSubscriber LOG: Logger", "def _name(self) -> str: return \"configuration_monitor\" @property def _subscriptions(self) -> List[Subscription]: roots =", "@property def _subscriptions(self) -> List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for root", "= self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for root in roots] subscription = { \"expression\":", "response: Dict[str, Any]) -> None: LOG.info( \"Update to configuration at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])),", "absolute_path): LOG.info(\"Pyre configuration changed. Stopping pyre server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() # TODO(T54088045): Find", "LOG.info(\"Local configuration changed. Stopping pyre server.\") stop.Stop( self.arguments, self.configuration, self.analysis_directory ).run() else: LOG.info(", "absolute_path = [ os.path.join(response[\"root\"], file) for file in response[\"files\"] ] # Find the", "is_parent from .watchman_subscriber import Subscription, WatchmanSubscriber LOG: Logger = logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\"", "compare it with the # list of changed configuration files. project_configuration = os.path.join(response[\"root\"],", "(c) 2016-present, Facebook, Inc. # # This source code is licensed under the", "from .commands import stop from .configuration import CONFIGURATION_FILE from .filesystem import is_parent from", "from .configuration import CONFIGURATION_FILE from .filesystem import is_parent from .watchman_subscriber import Subscription, WatchmanSubscriber", "\"expression\": [ \"allof\", [\"type\", \"f\"], [\"not\", \"empty\"], [ \"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"],", "): LOG.info(\"Local configuration changed. Stopping pyre server.\") stop.Stop( self.arguments, self.configuration, self.analysis_directory ).run() else:", "} return [ Subscription(root, name, subscription) for (root, name) in zip(roots, names) ]", "import Logger from typing import Any, Dict, List from .analysis_directory import AnalysisDirectory from", "code is licensed under the MIT license found in the # LICENSE file", "LICENSE file in the root directory of this source tree. # pyre-unsafe import", "] def _handle_response(self, response: Dict[str, Any]) -> None: LOG.info( \"Update to configuration at", "import AnalysisDirectory from .commands import stop from .configuration import CONFIGURATION_FILE from .filesystem import", "-> None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments = arguments self.configuration = configuration self.analysis_directory =", "to configuration at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path = [ os.path.join(response[\"root\"], file) for", "Dict[str, Any]) -> None: LOG.info( \"Update to configuration at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), )", "is licensed under the MIT license found in the # LICENSE file in", "# Find the path to the project configuration file and compare it with", "absolute_path ): LOG.info(\"Local configuration changed. Stopping pyre server.\") stop.Stop( self.arguments, self.configuration, self.analysis_directory ).run()", "[ \"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ], ], \"fields\": [\"name\"], } return [", "WatchmanSubscriber LOG: Logger = logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor watches only for", "def _handle_response(self, response: Dict[str, Any]) -> None: LOG.info( \"Update to configuration at %s\",", "else: LOG.info(\"None of the changed paths correspond to pyre configuration.\") if self.arguments.local_configuration: if", "a configuration changes. Logs are found in .pyre/configuration_monitor/configuration_monitor.log To kill a monitor, get", "file and compare it with the # list of changed configuration files. project_configuration", "name) in zip(roots, names) ] def _handle_response(self, response: Dict[str, Any]) -> None: LOG.info(", "pyre configuration.\") if self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration, file) for file in absolute_path ):", "Inc. # # This source code is licensed under the MIT license found", "source code is licensed under the MIT license found in the # LICENSE", "configuration at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path = [ os.path.join(response[\"root\"], file) for file", "= [\"pyre_monitor_{}\".format(os.path.basename(root)) for root in roots] subscription = { \"expression\": [ \"allof\", [\"type\",", "it with the # list of changed configuration files. project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE)", "LOG.info(\"Pyre configuration changed. Stopping pyre server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() # TODO(T54088045): Find all", "\"\"\" The ConfigurationMonitor watches only for .pyre_configuration(.local) files, and will kill the corresponding", "list of changed configuration files. project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration == file)", "# pyre-unsafe import logging import os from logging import Logger from typing import", "for file in absolute_path): LOG.info(\"Pyre configuration changed. Stopping pyre server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run()", ".pyre/configuration_monitor/configuration_monitor.log To kill a monitor, get pid from .pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>. \"\"\"", "Logs are found in .pyre/configuration_monitor/configuration_monitor.log To kill a monitor, get pid from .pyre/configuration_monitor/configuration_monitor.pid", "\"f\"], [\"not\", \"empty\"], [ \"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ], ], \"fields\": [\"name\"],", "= arguments self.configuration = configuration self.analysis_directory = analysis_directory @property def _name(self) -> str:", "[\"name\"], } return [ Subscription(root, name, subscription) for (root, name) in zip(roots, names)", "logging import os from logging import Logger from typing import Any, Dict, List", "self, arguments, configuration, analysis_directory: AnalysisDirectory ) -> None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments =", "subscription = { \"expression\": [ \"allof\", [\"type\", \"f\"], [\"not\", \"empty\"], [ \"anyof\", [\"suffix\",", "LOG.info(\"None of the changed paths correspond to pyre configuration.\") if self.arguments.local_configuration: if any(", "= { \"expression\": [ \"allof\", [\"type\", \"f\"], [\"not\", \"empty\"], [ \"anyof\", [\"suffix\", \"pyre_configuration.local\"],", "names) ] def _handle_response(self, response: Dict[str, Any]) -> None: LOG.info( \"Update to configuration", "from .watchman_subscriber import Subscription, WatchmanSubscriber LOG: Logger = logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The", "self.arguments, self.configuration, self.analysis_directory ).run() else: LOG.info( \"None of the changed paths correspond to", "# LICENSE file in the root directory of this source tree. # pyre-unsafe", "self.configuration, self.analysis_directory).run() # TODO(T54088045): Find all local pyre servers running underneath # and", "from logging import Logger from typing import Any, Dict, List from .analysis_directory import", "Logger = logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor watches only for .pyre_configuration(.local) files,", "Logger from typing import Any, Dict, List from .analysis_directory import AnalysisDirectory from .commands", "\"\"\" def __init__( self, arguments, configuration, analysis_directory: AnalysisDirectory ) -> None: super(ConfigurationMonitor, self).__init__(configuration,", ".filesystem import is_parent from .watchman_subscriber import Subscription, WatchmanSubscriber LOG: Logger = logging.getLogger(__name__) class", "project configuration file and compare it with the # list of changed configuration", "None: LOG.info( \"Update to configuration at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path = [", "\"allof\", [\"type\", \"f\"], [\"not\", \"empty\"], [ \"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ], ],", "self.arguments = arguments self.configuration = configuration self.analysis_directory = analysis_directory @property def _name(self) ->", "roots] subscription = { \"expression\": [ \"allof\", [\"type\", \"f\"], [\"not\", \"empty\"], [ \"anyof\",", "changes. Logs are found in .pyre/configuration_monitor/configuration_monitor.log To kill a monitor, get pid from", "file in absolute_path): LOG.info(\"Pyre configuration changed. Stopping pyre server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() #", "<gh_stars>1-10 # Copyright (c) 2016-present, Facebook, Inc. # # This source code is", "roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for root in roots] subscription = {", "running underneath # and stop them. else: LOG.info(\"None of the changed paths correspond", "[\"type\", \"f\"], [\"not\", \"empty\"], [ \"anyof\", [\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ], ], \"fields\":", "self.analysis_directory = analysis_directory @property def _name(self) -> str: return \"configuration_monitor\" @property def _subscriptions(self)", "from .filesystem import is_parent from .watchman_subscriber import Subscription, WatchmanSubscriber LOG: Logger = logging.getLogger(__name__)", "self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for root in roots] subscription = { \"expression\": [", "_handle_response(self, response: Dict[str, Any]) -> None: LOG.info( \"Update to configuration at %s\", os.path.join(response[\"root\"],", "\"fields\": [\"name\"], } return [ Subscription(root, name, subscription) for (root, name) in zip(roots,", "in absolute_path): LOG.info(\"Pyre configuration changed. Stopping pyre server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() # TODO(T54088045):", "configuration changed. Stopping pyre server.\") stop.Stop(self.arguments, self.configuration, self.analysis_directory).run() # TODO(T54088045): Find all local", "\"Update to configuration at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path = [ os.path.join(response[\"root\"], file)", "underneath # and stop them. else: LOG.info(\"None of the changed paths correspond to", "with the # list of changed configuration files. project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE) if", "analysis_directory) self.arguments = arguments self.configuration = configuration self.analysis_directory = analysis_directory @property def _name(self)", "analysis_directory @property def _name(self) -> str: return \"configuration_monitor\" @property def _subscriptions(self) -> List[Subscription]:", "at %s\", os.path.join(response[\"root\"], \",\".join(response[\"files\"])), ) absolute_path = [ os.path.join(response[\"root\"], file) for file in", ".pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>. \"\"\" def __init__( self, arguments, configuration, analysis_directory: AnalysisDirectory )", "configuration self.analysis_directory = analysis_directory @property def _name(self) -> str: return \"configuration_monitor\" @property def", "# This source code is licensed under the MIT license found in the", "in the root directory of this source tree. # pyre-unsafe import logging import", ".watchman_subscriber import Subscription, WatchmanSubscriber LOG: Logger = logging.getLogger(__name__) class ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor", "(root, name) in zip(roots, names) ] def _handle_response(self, response: Dict[str, Any]) -> None:", "for file in response[\"files\"] ] # Find the path to the project configuration", "in response[\"files\"] ] # Find the path to the project configuration file and", "and compare it with the # list of changed configuration files. project_configuration =", "files. project_configuration = os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration == file) for file in absolute_path):", "= os.path.join(response[\"root\"], CONFIGURATION_FILE) if any((project_configuration == file) for file in absolute_path): LOG.info(\"Pyre configuration", "will kill the corresponding server and ProjectFileMonitor when a configuration changes. Logs are", "configuration.\") if self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration, file) for file in absolute_path ): LOG.info(\"Local", "stop.Stop( self.arguments, self.configuration, self.analysis_directory ).run() else: LOG.info( \"None of the changed paths correspond", "return [ Subscription(root, name, subscription) for (root, name) in zip(roots, names) ] def", "2016-present, Facebook, Inc. # # This source code is licensed under the MIT", "\"configuration_monitor\" @property def _subscriptions(self) -> List[Subscription]: roots = self._watchman_client.query(\"watch-list\")[\"roots\"] names = [\"pyre_monitor_{}\".format(os.path.basename(root)) for", "them. else: LOG.info(\"None of the changed paths correspond to pyre configuration.\") if self.arguments.local_configuration:", "any( is_parent(self.arguments.local_configuration, file) for file in absolute_path ): LOG.info(\"Local configuration changed. Stopping pyre", "arguments, configuration, analysis_directory: AnalysisDirectory ) -> None: super(ConfigurationMonitor, self).__init__(configuration, analysis_directory) self.arguments = arguments", "] # Find the path to the project configuration file and compare it", "ConfigurationMonitor(WatchmanSubscriber): \"\"\" The ConfigurationMonitor watches only for .pyre_configuration(.local) files, and will kill the", "self.analysis_directory ).run() else: LOG.info( \"None of the changed paths correspond to the current", "the corresponding server and ProjectFileMonitor when a configuration changes. Logs are found in", "correspond to pyre configuration.\") if self.arguments.local_configuration: if any( is_parent(self.arguments.local_configuration, file) for file in", "[\"suffix\", \"pyre_configuration.local\"], [\"suffix\", \"pyre_configuration\"], ], ], \"fields\": [\"name\"], } return [ Subscription(root, name,", "import logging import os from logging import Logger from typing import Any, Dict,", "os from logging import Logger from typing import Any, Dict, List from .analysis_directory" ]
[ "i[0] not in all_place: all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select * from map\") # 城市位置:经纬坐标", "from time_table where Tran=? ORDER BY RANDOM() limit 30\", ('火车',)) # 30班次的火车 time_table_values", "'深圳': 0.2, '郑州': 0.5, '西安': 0.2} # 在城市停留的风险值 trans_dangers = {\"汽车\": 2, \"火车\":", "0.2, '郑州': 0.5, '西安': 0.2} # 在城市停留的风险值 trans_dangers = {\"汽车\": 2, \"火车\": 5,", "= [] # 所有航班里的城市 def load_data(cursor): # 从数据库加载数据 global time_table_values, map_values, map_geo, all_place", "where Tran=? ORDER BY RANDOM() limit 30\", ('火车',)) # 30班次的火车 time_table_values = cursor.fetchall()", "in tmp: time_table_values.append(i) for i in time_table_values: if i[0] not in all_place: all_place.append(i[0])", "in time_table_values: if i[0] not in all_place: all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select * from", "0.2, '广州': 0.5, '武汉': 0.9, '上海': 0.9, '重庆': 0.2, '青岛': 0.9, '深圳': 0.2,", "tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) for i in time_table_values: if", "= cursor.fetchall() for i in tmp: time_table_values.append(i) cursor.execute(\"select * from time_table where Tran!=?", "# 城市位置:经纬坐标 map_values = cursor.fetchall() for i in map_values: if i[0] in all_place:", "all_place: all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select * from map\") # 城市位置:经纬坐标 map_values = cursor.fetchall()", "'北京': 0.9, '成都': 0.5, '杭州': 0.2, '广州': 0.5, '武汉': 0.9, '上海': 0.9, '重庆':", "ORDER BY RANDOM() limit 30\", ('飞机',)) # 10班次的飞机 tmp = cursor.fetchall() for i", "0.5, '西安': 0.2} # 在城市停留的风险值 trans_dangers = {\"汽车\": 2, \"火车\": 5, \"飞机\": 9}", "time_table_values.append(i) for i in time_table_values: if i[0] not in all_place: all_place.append(i[0]) # 全部班次的城市的集合", "<filename>Data.py city_dangers = {'南京': 0.5, '北京': 0.9, '成都': 0.5, '杭州': 0.2, '广州': 0.5,", "map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM()", "limit 30\", ('火车',)) # 30班次的火车 time_table_values = cursor.fetchall() cursor.execute(\"select * from time_table where", "i in tmp: time_table_values.append(i) for i in time_table_values: if i[0] not in all_place:", "cursor.execute(\"select * from map\") # 城市位置:经纬坐标 map_values = cursor.fetchall() for i in map_values:", "0.9, '上海': 0.9, '重庆': 0.2, '青岛': 0.9, '深圳': 0.2, '郑州': 0.5, '西安': 0.2}", "Tran!=? and Tran!=?\", ('飞机', '火车')) # 所有汽车班次 tmp = cursor.fetchall() for i in", "= {'南京': 0.5, '北京': 0.9, '成都': 0.5, '杭州': 0.2, '广州': 0.5, '武汉': 0.9,", "time_table_values = [] # 所有班次时间表 map_values = [] # 所有城市的经纬度信息 map_geo = {}", "'武汉': 0.9, '上海': 0.9, '重庆': 0.2, '青岛': 0.9, '深圳': 0.2, '郑州': 0.5, '西安':", "# 全部班次的城市的集合 cursor.execute(\"select * from map\") # 城市位置:经纬坐标 map_values = cursor.fetchall() for i", "# 交通工具的风险值 time_table_values = [] # 所有班次时间表 map_values = [] # 所有城市的经纬度信息 map_geo", "0.2, '青岛': 0.9, '深圳': 0.2, '郑州': 0.5, '西安': 0.2} # 在城市停留的风险值 trans_dangers =", "'广州': 0.5, '武汉': 0.9, '上海': 0.9, '重庆': 0.2, '青岛': 0.9, '深圳': 0.2, '郑州':", "not in all_place: all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select * from map\") # 城市位置:经纬坐标 map_values", "all_place = [] # 所有航班里的城市 def load_data(cursor): # 从数据库加载数据 global time_table_values, map_values, map_geo,", "所有班次时间表 map_values = [] # 所有城市的经纬度信息 map_geo = {} # 所有城市的经纬度,字典性的结构 all_place =", "for i in map_values: if i[0] in all_place: map_geo[i[0]] = [i[1], i[2]] print(map_geo)", "9} # 交通工具的风险值 time_table_values = [] # 所有班次时间表 map_values = [] # 所有城市的经纬度信息", "def load_data(cursor): # 从数据库加载数据 global time_table_values, map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select *", "trans_dangers = {\"汽车\": 2, \"火车\": 5, \"飞机\": 9} # 交通工具的风险值 time_table_values = []", "Tran=? ORDER BY RANDOM() limit 30\", ('飞机',)) # 10班次的飞机 tmp = cursor.fetchall() for", "= cursor.fetchall() cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM() limit 30\",", "map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from time_table where Tran=? ORDER BY", "Tran!=?\", ('飞机', '火车')) # 所有汽车班次 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i)", "所有汽车班次 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) for i in time_table_values:", "# 所有汽车班次 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) for i in", "# 在城市停留的风险值 trans_dangers = {\"汽车\": 2, \"火车\": 5, \"飞机\": 9} # 交通工具的风险值 time_table_values", "{} # 所有城市的经纬度,字典性的结构 all_place = [] # 所有航班里的城市 def load_data(cursor): # 从数据库加载数据 global", "from time_table where Tran!=? and Tran!=?\", ('飞机', '火车')) # 所有汽车班次 tmp = cursor.fetchall()", "for i in tmp: time_table_values.append(i) for i in time_table_values: if i[0] not in", "cursor.fetchall() for i in tmp: time_table_values.append(i) cursor.execute(\"select * from time_table where Tran!=? and", "0.9, '重庆': 0.2, '青岛': 0.9, '深圳': 0.2, '郑州': 0.5, '西安': 0.2} # 在城市停留的风险值", "= cursor.fetchall() for i in map_values: if i[0] in all_place: map_geo[i[0]] = [i[1],", "[] # 所有班次时间表 map_values = [] # 所有城市的经纬度信息 map_geo = {} # 所有城市的经纬度,字典性的结构", "城市位置:经纬坐标 map_values = cursor.fetchall() for i in map_values: if i[0] in all_place: map_geo[i[0]]", "cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM() limit 30\", ('火车',)) #", "all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select * from map\") # 城市位置:经纬坐标 map_values = cursor.fetchall() for", "* from time_table where Tran=? ORDER BY RANDOM() limit 30\", ('火车',)) # 30班次的火车", "('火车',)) # 30班次的火车 time_table_values = cursor.fetchall() cursor.execute(\"select * from time_table where Tran=? ORDER", "0.5, '武汉': 0.9, '上海': 0.9, '重庆': 0.2, '青岛': 0.9, '深圳': 0.2, '郑州': 0.5,", "所有城市的经纬度信息 map_geo = {} # 所有城市的经纬度,字典性的结构 all_place = [] # 所有航班里的城市 def load_data(cursor):", "and Tran!=?\", ('飞机', '火车')) # 所有汽车班次 tmp = cursor.fetchall() for i in tmp:", "for i in time_table_values: if i[0] not in all_place: all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select", "BY RANDOM() limit 30\", ('火车',)) # 30班次的火车 time_table_values = cursor.fetchall() cursor.execute(\"select * from", "where Tran=? ORDER BY RANDOM() limit 30\", ('飞机',)) # 10班次的飞机 tmp = cursor.fetchall()", "5, \"飞机\": 9} # 交通工具的风险值 time_table_values = [] # 所有班次时间表 map_values = []", "2, \"火车\": 5, \"飞机\": 9} # 交通工具的风险值 time_table_values = [] # 所有班次时间表 map_values", "= {\"汽车\": 2, \"火车\": 5, \"飞机\": 9} # 交通工具的风险值 time_table_values = [] #", "{\"汽车\": 2, \"火车\": 5, \"飞机\": 9} # 交通工具的风险值 time_table_values = [] # 所有班次时间表", "'杭州': 0.2, '广州': 0.5, '武汉': 0.9, '上海': 0.9, '重庆': 0.2, '青岛': 0.9, '深圳':", "'郑州': 0.5, '西安': 0.2} # 在城市停留的风险值 trans_dangers = {\"汽车\": 2, \"火车\": 5, \"飞机\":", "i in time_table_values: if i[0] not in all_place: all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select *", "# 10班次的飞机 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) cursor.execute(\"select * from", "全部班次的城市的集合 cursor.execute(\"select * from map\") # 城市位置:经纬坐标 map_values = cursor.fetchall() for i in", "# 所有班次时间表 map_values = [] # 所有城市的经纬度信息 map_geo = {} # 所有城市的经纬度,字典性的结构 all_place", "[] # 所有航班里的城市 def load_data(cursor): # 从数据库加载数据 global time_table_values, map_values, map_geo, all_place #", "time_table where Tran=? ORDER BY RANDOM() limit 30\", ('火车',)) # 30班次的火车 time_table_values =", "ORDER BY RANDOM() limit 30\", ('火车',)) # 30班次的火车 time_table_values = cursor.fetchall() cursor.execute(\"select *", "Tran=? ORDER BY RANDOM() limit 30\", ('火车',)) # 30班次的火车 time_table_values = cursor.fetchall() cursor.execute(\"select", "map_geo = {} # 所有城市的经纬度,字典性的结构 all_place = [] # 所有航班里的城市 def load_data(cursor): #", "10班次的飞机 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) cursor.execute(\"select * from time_table", "# 从数据库加载数据 global time_table_values, map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from time_table", "time_table where Tran!=? and Tran!=?\", ('飞机', '火车')) # 所有汽车班次 tmp = cursor.fetchall() for", "= cursor.fetchall() for i in tmp: time_table_values.append(i) for i in time_table_values: if i[0]", "0.2} # 在城市停留的风险值 trans_dangers = {\"汽车\": 2, \"火车\": 5, \"飞机\": 9} # 交通工具的风险值", "map\") # 城市位置:经纬坐标 map_values = cursor.fetchall() for i in map_values: if i[0] in", "0.9, '深圳': 0.2, '郑州': 0.5, '西安': 0.2} # 在城市停留的风险值 trans_dangers = {\"汽车\": 2,", "in tmp: time_table_values.append(i) cursor.execute(\"select * from time_table where Tran!=? and Tran!=?\", ('飞机', '火车'))", "'重庆': 0.2, '青岛': 0.9, '深圳': 0.2, '郑州': 0.5, '西安': 0.2} # 在城市停留的风险值 trans_dangers", "if i[0] not in all_place: all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select * from map\") #", "('飞机',)) # 10班次的飞机 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) cursor.execute(\"select *", "* from time_table where Tran!=? and Tran!=?\", ('飞机', '火车')) # 所有汽车班次 tmp =", "time_table_values = cursor.fetchall() cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM() limit", "cursor.fetchall() for i in tmp: time_table_values.append(i) for i in time_table_values: if i[0] not", "= [] # 所有班次时间表 map_values = [] # 所有城市的经纬度信息 map_geo = {} #", "tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) cursor.execute(\"select * from time_table where", "city_dangers = {'南京': 0.5, '北京': 0.9, '成都': 0.5, '杭州': 0.2, '广州': 0.5, '武汉':", "定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM() limit 30\", ('火车',))", "where Tran!=? and Tran!=?\", ('飞机', '火车')) # 所有汽车班次 tmp = cursor.fetchall() for i", "for i in tmp: time_table_values.append(i) cursor.execute(\"select * from time_table where Tran!=? and Tran!=?\",", "time_table_values, map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from time_table where Tran=? ORDER", "= {} # 所有城市的经纬度,字典性的结构 all_place = [] # 所有航班里的城市 def load_data(cursor): # 从数据库加载数据", "from map\") # 城市位置:经纬坐标 map_values = cursor.fetchall() for i in map_values: if i[0]", "map_values = [] # 所有城市的经纬度信息 map_geo = {} # 所有城市的经纬度,字典性的结构 all_place = []", "30\", ('火车',)) # 30班次的火车 time_table_values = cursor.fetchall() cursor.execute(\"select * from time_table where Tran=?", "('飞机', '火车')) # 所有汽车班次 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) for", "time_table where Tran=? ORDER BY RANDOM() limit 30\", ('飞机',)) # 10班次的飞机 tmp =", "[] # 所有城市的经纬度信息 map_geo = {} # 所有城市的经纬度,字典性的结构 all_place = [] # 所有航班里的城市", "交通工具的风险值 time_table_values = [] # 所有班次时间表 map_values = [] # 所有城市的经纬度信息 map_geo =", "tmp: time_table_values.append(i) cursor.execute(\"select * from time_table where Tran!=? and Tran!=?\", ('飞机', '火车')) #", "time_table_values: if i[0] not in all_place: all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select * from map\")", "* from time_table where Tran=? ORDER BY RANDOM() limit 30\", ('飞机',)) # 10班次的飞机", "0.5, '北京': 0.9, '成都': 0.5, '杭州': 0.2, '广州': 0.5, '武汉': 0.9, '上海': 0.9,", "'青岛': 0.9, '深圳': 0.2, '郑州': 0.5, '西安': 0.2} # 在城市停留的风险值 trans_dangers = {\"汽车\":", "在城市停留的风险值 trans_dangers = {\"汽车\": 2, \"火车\": 5, \"飞机\": 9} # 交通工具的风险值 time_table_values =", "BY RANDOM() limit 30\", ('飞机',)) # 10班次的飞机 tmp = cursor.fetchall() for i in", "# 30班次的火车 time_table_values = cursor.fetchall() cursor.execute(\"select * from time_table where Tran=? ORDER BY", "tmp: time_table_values.append(i) for i in time_table_values: if i[0] not in all_place: all_place.append(i[0]) #", "所有城市的经纬度,字典性的结构 all_place = [] # 所有航班里的城市 def load_data(cursor): # 从数据库加载数据 global time_table_values, map_values,", "RANDOM() limit 30\", ('火车',)) # 30班次的火车 time_table_values = cursor.fetchall() cursor.execute(\"select * from time_table", "from time_table where Tran=? ORDER BY RANDOM() limit 30\", ('飞机',)) # 10班次的飞机 tmp", "cursor.fetchall() cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM() limit 30\", ('飞机',))", "'西安': 0.2} # 在城市停留的风险值 trans_dangers = {\"汽车\": 2, \"火车\": 5, \"飞机\": 9} #", "{'南京': 0.5, '北京': 0.9, '成都': 0.5, '杭州': 0.2, '广州': 0.5, '武汉': 0.9, '上海':", "从数据库加载数据 global time_table_values, map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from time_table where", "# 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM() limit 30\",", "cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM() limit 30\", ('飞机',)) #", "'成都': 0.5, '杭州': 0.2, '广州': 0.5, '武汉': 0.9, '上海': 0.9, '重庆': 0.2, '青岛':", "limit 30\", ('飞机',)) # 10班次的飞机 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i)", "0.5, '杭州': 0.2, '广州': 0.5, '武汉': 0.9, '上海': 0.9, '重庆': 0.2, '青岛': 0.9,", "i in tmp: time_table_values.append(i) cursor.execute(\"select * from time_table where Tran!=? and Tran!=?\", ('飞机',", "cursor.fetchall() for i in map_values: if i[0] in all_place: map_geo[i[0]] = [i[1], i[2]]", "所有航班里的城市 def load_data(cursor): # 从数据库加载数据 global time_table_values, map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select", "all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM() limit", "in all_place: all_place.append(i[0]) # 全部班次的城市的集合 cursor.execute(\"select * from map\") # 城市位置:经纬坐标 map_values =", "\"飞机\": 9} # 交通工具的风险值 time_table_values = [] # 所有班次时间表 map_values = [] #", "= [] # 所有城市的经纬度信息 map_geo = {} # 所有城市的经纬度,字典性的结构 all_place = [] #", "30班次的火车 time_table_values = cursor.fetchall() cursor.execute(\"select * from time_table where Tran=? ORDER BY RANDOM()", "time_table_values.append(i) cursor.execute(\"select * from time_table where Tran!=? and Tran!=?\", ('飞机', '火车')) # 所有汽车班次", "global time_table_values, map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from time_table where Tran=?", "# 所有城市的经纬度,字典性的结构 all_place = [] # 所有航班里的城市 def load_data(cursor): # 从数据库加载数据 global time_table_values,", "'火车')) # 所有汽车班次 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) for i", "'上海': 0.9, '重庆': 0.2, '青岛': 0.9, '深圳': 0.2, '郑州': 0.5, '西安': 0.2} #", "\"火车\": 5, \"飞机\": 9} # 交通工具的风险值 time_table_values = [] # 所有班次时间表 map_values =", "RANDOM() limit 30\", ('飞机',)) # 10班次的飞机 tmp = cursor.fetchall() for i in tmp:", "0.9, '成都': 0.5, '杭州': 0.2, '广州': 0.5, '武汉': 0.9, '上海': 0.9, '重庆': 0.2,", "# 所有城市的经纬度信息 map_geo = {} # 所有城市的经纬度,字典性的结构 all_place = [] # 所有航班里的城市 def", "load_data(cursor): # 从数据库加载数据 global time_table_values, map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值 cursor.execute(\"select * from", "* from map\") # 城市位置:经纬坐标 map_values = cursor.fetchall() for i in map_values: if", "map_values = cursor.fetchall() for i in map_values: if i[0] in all_place: map_geo[i[0]] =", "# 所有航班里的城市 def load_data(cursor): # 从数据库加载数据 global time_table_values, map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值", "cursor.execute(\"select * from time_table where Tran!=? and Tran!=?\", ('飞机', '火车')) # 所有汽车班次 tmp", "30\", ('飞机',)) # 10班次的飞机 tmp = cursor.fetchall() for i in tmp: time_table_values.append(i) cursor.execute(\"select" ]
[ "k, v in value.items() } return value def list_to_dict(list_): \"\"\"Transform a list to", "a class from `config.models` to a dict.\"\"\" if hasattr(value, 'as_dict'): return value.as_dict() if", "to_dict(value): \"\"\"Recursively transform a class from `config.models` to a dict.\"\"\" if hasattr(value, 'as_dict'):", "list): return [ to_dict(v) for v in value ] if isinstance(value, dict): return", "'as_dict'): return value.as_dict() if isinstance(value, list): return [ to_dict(v) for v in value", "value ] if isinstance(value, dict): return { k: to_dict(v) for k, v in", "value.items() } return value def list_to_dict(list_): \"\"\"Transform a list to a dictionary with", "v in value ] if isinstance(value, dict): return { k: to_dict(v) for k,", "\"\"\"Recursively transform a class from `config.models` to a dict.\"\"\" if hasattr(value, 'as_dict'): return", "to a dict.\"\"\" if hasattr(value, 'as_dict'): return value.as_dict() if isinstance(value, list): return [", "dictionary with its indices as keys.\"\"\" dict_ = { str(i): element for i,", "return value.as_dict() if isinstance(value, list): return [ to_dict(v) for v in value ]", "its indices as keys.\"\"\" dict_ = { str(i): element for i, element in", "in value.items() } return value def list_to_dict(list_): \"\"\"Transform a list to a dictionary", "to_dict(v) for k, v in value.items() } return value def list_to_dict(list_): \"\"\"Transform a", "class from `config.models` to a dict.\"\"\" if hasattr(value, 'as_dict'): return value.as_dict() if isinstance(value,", "return [ to_dict(v) for v in value ] if isinstance(value, dict): return {", "hasattr(value, 'as_dict'): return value.as_dict() if isinstance(value, list): return [ to_dict(v) for v in", "list to a dictionary with its indices as keys.\"\"\" dict_ = { str(i):", "module.\"\"\" def to_dict(value): \"\"\"Recursively transform a class from `config.models` to a dict.\"\"\" if", "from `config.models` to a dict.\"\"\" if hasattr(value, 'as_dict'): return value.as_dict() if isinstance(value, list):", "} return value def list_to_dict(list_): \"\"\"Transform a list to a dictionary with its", "dict.\"\"\" if hasattr(value, 'as_dict'): return value.as_dict() if isinstance(value, list): return [ to_dict(v) for", "value def list_to_dict(list_): \"\"\"Transform a list to a dictionary with its indices as", "transform a class from `config.models` to a dict.\"\"\" if hasattr(value, 'as_dict'): return value.as_dict()", "`config.models` to a dict.\"\"\" if hasattr(value, 'as_dict'): return value.as_dict() if isinstance(value, list): return", "the config module.\"\"\" def to_dict(value): \"\"\"Recursively transform a class from `config.models` to a", "return value def list_to_dict(list_): \"\"\"Transform a list to a dictionary with its indices", "for v in value ] if isinstance(value, dict): return { k: to_dict(v) for", "if isinstance(value, dict): return { k: to_dict(v) for k, v in value.items() }", "a dict.\"\"\" if hasattr(value, 'as_dict'): return value.as_dict() if isinstance(value, list): return [ to_dict(v)", "dict): return { k: to_dict(v) for k, v in value.items() } return value", "for the config module.\"\"\" def to_dict(value): \"\"\"Recursively transform a class from `config.models` to", "functions for the config module.\"\"\" def to_dict(value): \"\"\"Recursively transform a class from `config.models`", "isinstance(value, list): return [ to_dict(v) for v in value ] if isinstance(value, dict):", "to_dict(v) for v in value ] if isinstance(value, dict): return { k: to_dict(v)", "for k, v in value.items() } return value def list_to_dict(list_): \"\"\"Transform a list", "a dictionary with its indices as keys.\"\"\" dict_ = { str(i): element for", "dict_ = { str(i): element for i, element in enumerate(list_) } return dict_", "{ k: to_dict(v) for k, v in value.items() } return value def list_to_dict(list_):", "def list_to_dict(list_): \"\"\"Transform a list to a dictionary with its indices as keys.\"\"\"", "isinstance(value, dict): return { k: to_dict(v) for k, v in value.items() } return", "def to_dict(value): \"\"\"Recursively transform a class from `config.models` to a dict.\"\"\" if hasattr(value,", "] if isinstance(value, dict): return { k: to_dict(v) for k, v in value.items()", "value.as_dict() if isinstance(value, list): return [ to_dict(v) for v in value ] if", "v in value.items() } return value def list_to_dict(list_): \"\"\"Transform a list to a", "to a dictionary with its indices as keys.\"\"\" dict_ = { str(i): element", "if isinstance(value, list): return [ to_dict(v) for v in value ] if isinstance(value,", "\"\"\"Shared functions for the config module.\"\"\" def to_dict(value): \"\"\"Recursively transform a class from", "[ to_dict(v) for v in value ] if isinstance(value, dict): return { k:", "\"\"\"Transform a list to a dictionary with its indices as keys.\"\"\" dict_ =", "k: to_dict(v) for k, v in value.items() } return value def list_to_dict(list_): \"\"\"Transform", "with its indices as keys.\"\"\" dict_ = { str(i): element for i, element", "keys.\"\"\" dict_ = { str(i): element for i, element in enumerate(list_) } return", "indices as keys.\"\"\" dict_ = { str(i): element for i, element in enumerate(list_)", "in value ] if isinstance(value, dict): return { k: to_dict(v) for k, v", "return { k: to_dict(v) for k, v in value.items() } return value def", "config module.\"\"\" def to_dict(value): \"\"\"Recursively transform a class from `config.models` to a dict.\"\"\"", "as keys.\"\"\" dict_ = { str(i): element for i, element in enumerate(list_) }", "list_to_dict(list_): \"\"\"Transform a list to a dictionary with its indices as keys.\"\"\" dict_", "a list to a dictionary with its indices as keys.\"\"\" dict_ = {", "if hasattr(value, 'as_dict'): return value.as_dict() if isinstance(value, list): return [ to_dict(v) for v" ]
[ "check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass else: print (ba_name+\"-->Error\") def checkth(): for g", "qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i) for i in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name):", "ba_all_url=\"http://\"+str([i.attr(\"href\") for i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with", "'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML,", "in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\")", "writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]:", "as fob: fob.write(str(tuple(ba_name_list))) def checkconfig(): if \"qd_config.ini\" in os.listdir(os.getcwd()): pass else: writeconfig() def", "global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read()) def serun(): checkconfig() readconfig() go() if", "if str(return_list)==\"['已签到']\": pass else: print (ba_name+\"-->Error\") def checkth(): for g in ba_name_tuple: m=Thread(target=check,args=(g,))", "ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read()) def serun(): checkconfig() readconfig() go() if __name__==\"__main__\":", "'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)", "x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers)", "in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass else: print", "'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64)", "schedule from pyquery import PyQuery as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip,", "with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list))) def checkconfig(): if \"qd_config.ini\" in os.listdir(os.getcwd()): pass else:", "'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115", "retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list))) def checkconfig(): if \"qd_config.ini\" in os.listdir(os.getcwd()):", "Thread import schedule from pyquery import PyQuery as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={", "in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob:", "ba_name_tuple=eval(fob.read()) def serun(): checkconfig() readconfig() go() if __name__==\"__main__\": schedule.every().day.at(\"00:10\").do(serun) #每天的签到时间 while 1: schedule.run_pending()", "pyquery import PyQuery as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8',", "open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list))) def checkconfig(): if \"qd_config.ini\" in os.listdir(os.getcwd()): pass else: writeconfig()", "gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0", "as fob: ba_name_tuple=eval(fob.read()) def serun(): checkconfig() readconfig() go() if __name__==\"__main__\": schedule.every().day.at(\"00:10\").do(serun) #每天的签到时间 while", "def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass else: print (ba_name+\"-->Error\") def checkth(): for", "def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i) for i in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\")", "taske=[gevent.spawn(run,url,i) for i in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\":", "Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i) for i", "headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0;", "ba_name_list=[] for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list))) def checkconfig():", "in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list))) def checkconfig(): if \"qd_config.ini\" in", "import requests,time,gevent,gevent.monkey,re,os from threading import Thread import schedule from pyquery import PyQuery as", "s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i) for i in ba_name_tuple] gevent.joinall(taske)", "(Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session()", "s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i) for i in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text", "requests,time,gevent,gevent.monkey,re,os from threading import Thread import schedule from pyquery import PyQuery as pq", "deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36", "import schedule from pyquery import PyQuery as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',", "os.listdir(os.getcwd()): pass else: writeconfig() def readconfig(): global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read())", "Gecko) Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i) for", "ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT", "Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name", "gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass else: print (ba_name+\"-->Error\") def", "str(return_list)==\"['已签到']\": pass else: print (ba_name+\"-->Error\") def checkth(): for g in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start()", "pass else: writeconfig() def readconfig(): global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read()) def", "fob.write(str(tuple(ba_name_list))) def checkconfig(): if \"qd_config.ini\" in os.listdir(os.getcwd()): pass else: writeconfig() def readconfig(): global", "'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64;", "def readconfig(): global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read()) def serun(): checkconfig() readconfig()", "threading import Thread import schedule from pyquery import PyQuery as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\"", "like Gecko) Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i)", "else: print (ba_name+\"-->Error\") def checkth(): for g in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def writeconfig():", "i in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass else:", "g in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i in temp(\".my_love_bar\").children().items()][-1])[2:]", "temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4])", "import PyQuery as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie,", "temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list)))", "readconfig(): global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read()) def serun(): checkconfig() readconfig() go()", "run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i) for i in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def", "else: writeconfig() def readconfig(): global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read()) def serun():", "return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass else: print (ba_name+\"-->Error\") def checkth(): for g in ba_name_tuple:", "from threading import Thread import schedule from pyquery import PyQuery as pq gevent.monkey.patch_socket()", "for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list))) def checkconfig(): if", "(KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go():", "\"qd_config.ini\" in os.listdir(os.getcwd()): pass else: writeconfig() def readconfig(): global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as", "rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass else: print (ba_name+\"-->Error\") def checkth():", "(ba_name+\"-->Error\") def checkth(): for g in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\")", "ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list))) def checkconfig(): if \"qd_config.ini\" in os.listdir(os.getcwd()): pass", "for g in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i in", "print (ba_name+\"-->Error\") def checkth(): for g in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content)", "import Thread import schedule from pyquery import PyQuery as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这'", "for i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\")", "m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i", "if \"qd_config.ini\" in os.listdir(os.getcwd()): pass else: writeconfig() def readconfig(): global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\")", "pass else: print (ba_name+\"-->Error\") def checkth(): for g in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def", "go(): taske=[gevent.spawn(run,url,i) for i in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if", "i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as", "open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read()) def serun(): checkconfig() readconfig() go() if __name__==\"__main__\": schedule.every().day.at(\"00:10\").do(serun) #每天的签到时间", "'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' }", "def checkconfig(): if \"qd_config.ini\" in os.listdir(os.getcwd()): pass else: writeconfig() def readconfig(): global ba_name_tuple", "checkconfig(): if \"qd_config.ini\" in os.listdir(os.getcwd()): pass else: writeconfig() def readconfig(): global ba_name_tuple with", "PyQuery as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com',", "ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[]", "def serun(): checkconfig() readconfig() go() if __name__==\"__main__\": schedule.every().day.at(\"00:10\").do(serun) #每天的签到时间 while 1: schedule.run_pending() time.sleep(1)", "writeconfig() def readconfig(): global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read()) def serun(): checkconfig()", "retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list))) def", "for i in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass", "fob: fob.write(str(tuple(ba_name_list))) def checkconfig(): if \"qd_config.ini\" in os.listdir(os.getcwd()): pass else: writeconfig() def readconfig():", "in os.listdir(os.getcwd()): pass else: writeconfig() def readconfig(): global ba_name_tuple with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob:", "as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive',", "'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'", "Safari/537.36' } s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i) for i in", "} s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def go(): taske=[gevent.spawn(run,url,i) for i in ba_name_tuple]", "m=Thread(target=check,args=(g,)) m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for", "def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i in temp(\".my_love_bar\").children().items()][-1])[2:] retemp=re.compile(r\">\\w*</a>\") ba_name_list=[] for i in", "with open(\"qd_config.ini\",\"r\",encoding=\"utf-8\") as fob: ba_name_tuple=eval(fob.read()) def serun(): checkconfig() readconfig() go() if __name__==\"__main__\": schedule.every().day.at(\"00:10\").do(serun)", "from pyquery import PyQuery as pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate',", "i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]: ba_name_list.append(i[1:-4]) with open(\"qd_config.ini\",\"w+\",encoding=\"utf-8\") as fob: fob.write(str(tuple(ba_name_list))) def checkconfig(): if \"qd_config.ini\"", "ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass else: print (ba_name+\"-->Error\")", "'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like", "def go(): taske=[gevent.spawn(run,url,i) for i in ba_name_tuple] gevent.joinall(taske) rebuild=re.compile(r\"已签到\") def check(ba_name): content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content)", "NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session() def", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session() def run(ba_url,ba_name): qian_url=ba_url+ba_name s.get(qian_url,headers=headers) def", "pq gevent.monkey.patch_socket() url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1',", "content=s.get(url+ba_name,headers=headers).text return_list=rebuild.findall(content) if str(return_list)==\"['已签到']\": pass else: print (ba_name+\"-->Error\") def checkth(): for g in", "fob: ba_name_tuple=eval(fob.read()) def serun(): checkconfig() readconfig() go() if __name__==\"__main__\": schedule.every().day.at(\"00:10\").do(serun) #每天的签到时间 while 1:", "url=\"http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw=\" ba_cookie='把cookie放到这' headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cookie':ba_cookie, 'Host':'tieba.baidu.com', 'Proxy-Connection':'keep-alive', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows", "10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' } s=requests.Session() def run(ba_url,ba_name):", "checkth(): for g in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for i", "def checkth(): for g in ba_name_tuple: m=Thread(target=check,args=(g,)) m.start() def writeconfig(): temp=pq(requests.get(\"http://wapp.baidu.com/\",headers={'Cookie':ba_cookie}).content) ba_all_url=\"http://\"+str([i.attr(\"href\") for" ]
[ "import setuptools setuptools.setup( name=\"dreamerv2\", version=\"1.0.0\", description=( \"Mastering Atari with Discrete World Models\" ),", "<reponame>magamba/dreamerv2 import setuptools setuptools.setup( name=\"dreamerv2\", version=\"1.0.0\", description=( \"Mastering Atari with Discrete World Models\"", "name=\"dreamerv2\", version=\"1.0.0\", description=( \"Mastering Atari with Discrete World Models\" ), license=\"MIT License\", url=\"https://github.com/RajGhugare19/dreamerv2\",", "setuptools.setup( name=\"dreamerv2\", version=\"1.0.0\", description=( \"Mastering Atari with Discrete World Models\" ), license=\"MIT License\",", "version=\"1.0.0\", description=( \"Mastering Atari with Discrete World Models\" ), license=\"MIT License\", url=\"https://github.com/RajGhugare19/dreamerv2\", packages=setuptools.find_packages(),", "setuptools setuptools.setup( name=\"dreamerv2\", version=\"1.0.0\", description=( \"Mastering Atari with Discrete World Models\" ), license=\"MIT", "description=( \"Mastering Atari with Discrete World Models\" ), license=\"MIT License\", url=\"https://github.com/RajGhugare19/dreamerv2\", packages=setuptools.find_packages(), )" ]
[ "== 's3': endpoint_url = os.environ.get('S3_ENDPOINT_URL') if not endpoint_url: return True if url_util.parse(endpoint_url, scheme='https').scheme", "if the root can't be fetched; it ignores errors with pages that the", "key = None while True: contents, key = _list_s3_objects( client, url, num_entries, start_after=key)", "None, None return response.geturl(), response.headers, response def warn_no_ssl_cert_checking(): tty.warn(\"Spack will not check SSL", "a tuple of: - pages: dict of pages visited (URL) mapped to their", "the resulting item is undefined. The new dict is returned if at least", "'ast', sep, M, 'odified')) for L, sep, M in product('Ll', _separators, 'Mm')), \"Server\":", "as described above are returned unaltered. For example: The standard spelling of \"Content-length\"", "if at least one of its elements differ from their corrsponding element in", "llnl.util.filesystem import mkdirp import llnl.util.tty as tty import spack.cmd import spack.config import spack.error", "HTML.\" # Pre-2.7.3 Pythons in particular have rather prickly HTML parsing. if sys.version_info[:3]", "from page URL -> text content. links = set() # set of all", "can use this to extract things # on a page that look like", "is considered, where the key in each item is the key of its", "in ('http', 'https') if accept_content_type and is_web_url: # Make a HEAD request first", "_, response = read_from_url(url, 'text/html') if not response_url or not response: return pages,", "element is the standardized spelling for headers[0]. If headers is a sequence, then", "kwargs: del kwargs['context'] opener = urlopen if url_util.parse(url).scheme == 's3': import spack.s3_handler opener", "response.geturl(), response.headers, response def warn_no_ssl_cert_checking(): tty.warn(\"Spack will not check SSL certificates. You need", "underscore (_), or space ( ). Header names that cannot be mapped as", "import codecs import errno import re import os import os.path import shutil import", "only if the root can't be fetched; it ignores errors with pages that", "except AttributeError: pass # We don't pass 'context' parameter because it was only", "cannot be mapped as described above are returned unaltered. For example: The standard", "depth=list_depth) pages.update(pg) links.update(lnk) # Scrape them for archive URLs regexes = [] for", "differs from it, then a new tuple is returned. This tuple has the", "ExtraArgs=extra_args) if not keep_original: os.remove(local_file_path) else: raise NotImplementedError( 'Unrecognized URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme))", "_iter_s3_prefix(client, url, num_entries=1024): key = None while True: contents, key = _list_s3_objects( client,", "= ssl.create_default_context() else: # User has explicitly indicated that they do not want", "). Header names that cannot be mapped as described above are returned unaltered.", "This new list is returned if at least one of its elements differ", "more liberal and just look for the archive # part, not the full", "web requests _timeout = 10 # See docstring for standardize_header_names() _separators = ('',", "from the root. Prints out a warning only if the root can't be", "Length ... and any other header name, such as \"Content-encoding\", would not be", "string or tuple. The value is taken from the corresponding item. If the", "if headers == standardized_spelling: return headers return standardized_spelling return headers if isinstance(headers, tuple):", "on a page that look like archive URLs. url_regex = spack.url.wildcard_version(aurl) # We'll", "# # SPDX-License-Identifier: (Apache-2.0 OR MIT) from __future__ import print_function import codecs import", "archive if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES): continue # Skip things outside the", "first element is the standardized spelling for headers[0]. If headers is a sequence,", "== 'https': return True if parsed_url.scheme == 's3': endpoint_url = os.environ.get('S3_ENDPOINT_URL') if not", "the links in the page link_parser = LinkParser() subcalls = [] link_parser.feed(page) while", "{} for key, value in headers.items(): if isinstance(key, (tuple, six.string_types)): old_key, key =", "This tuple has the same elements as headers, except the first element is", "'ontent', sep, L, 'ength')) for C, sep, L in product('Cc', _separators, 'Ll')), \"Content-type\":", "- content_length - contentlength - content_Length - contentLength - content Length ... and", "other_spellings: if headers == standardized_spelling: return headers return standardized_spelling return headers if isinstance(headers,", "('', ' ', '_', '-') HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\": set( ''.join((A, 'ccept', sep,", "package. from HTMLParser import HTMLParser, HTMLParseError except ImportError: # In Python 3, things", "''.join((C, 'ontent', sep, T, 'ype')) for C, sep, T in product('Cc', _separators, 'Tt')),", "url, \"HTMLParseError: \" + str(e)) except Exception as e: # Other types of", "remote_path, ExtraArgs=extra_args) if not keep_original: os.remove(local_file_path) else: raise NotImplementedError( 'Unrecognized URL scheme: {SCHEME}'.format(", "'s3': extra_args = kwargs.get('extra_args', {}) remote_path = remote_url.path while remote_path.startswith('/'): remote_path = remote_path[1:]", "tarball with Accept: text/html. req.get_method = lambda: \"HEAD\" resp = _urlopen(req, timeout=_timeout, context=context)", "start_after=key) for x in contents: yield x if not key: break def list_url(url):", "= LinkParser() subcalls = [] link_parser.feed(page) while link_parser.links: raw_link = link_parser.links.pop() abs_link =", "package. Typically these are just the tarballs from the package file itself. By", "for aurl in archive_urls: list_urls |= spack.url.find_list_urls(aurl) # Add '/' to the end", "cases headers is returned unaltered. \"\"\" if isinstance(headers, six.string_types): for standardized_spelling, other_spellings in", "end in '/download'. url_regex += r'(\\/download)?$' regexes.append(url_regex) # Build a dict version ->", "follow from the root. Prints out a warning only if the root can't", "the root. This will spawn processes to fetch the children, for much improved", "tuple has the same elements as headers, except the first element is the", "uses # SSL certs. if uses_ssl(url): if verify_ssl: if __UNABLE_TO_VERIFY_SSL: # User wants", "if subcalls: pool = NonDaemonPool(processes=len(subcalls)) try: results = pool.map(_spider_wrapper, subcalls) for sub_pages, sub_links", "headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack web spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when", "# set of all links seen on visited pages. try: response_url, _, response", "creates a regex from the URL with a capture group for # the", "of links to follow from the root. Prints out a warning only if", "pass try: changed = False new_list = [] for item in headers: if", "C, sep, L in product('Cc', _separators, 'Ll')), \"Content-type\": set( ''.join((C, 'ontent', sep, T,", "url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path =", "end of the regex to prevent # Spack from picking up signature files", "if isinstance(headers, tuple): if not headers: return headers old = headers[0] if isinstance(old,", "because it crosses # filesystem boundaries. Copy the file (plus original # metadata),", "num_entries=1024): key = None while True: contents, key = _list_s3_objects( client, url, num_entries,", "Python's HTML parser sucks. msg = \"Got an error parsing HTML.\" # Pre-2.7.3", "standardized_spelling: return headers return standardized_spelling return headers if isinstance(headers, tuple): if not headers:", "these pages for download links that look like the archive URL. list_depth (int):", "= changed or key is not old_key new_dict[key] = value return new_dict if", "web pages to scrape. pages = {} links = set() for lurl in", "all other cases headers is returned unaltered. \"\"\" if isinstance(headers, six.string_types): for standardized_spelling,", "if verify_ssl: if __UNABLE_TO_VERIFY_SSL: # User wants SSL verification, but it cannot be", "tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url), \" with content type \" if content_type is not", "need to update \" \"your Python to enable certificate verification.\") def push_to_url(local_file_path, remote_path,", "they do not want SSL # verification. context = ssl._create_unverified_context() req = Request(url_util.format(url))", "NonDaemonPool(processes=len(subcalls)) try: results = pool.map(_spider_wrapper, subcalls) for sub_pages, sub_links in results: pages.update(sub_pages) links.update(sub_links)", "sys.version_info[:3] < (2, 7, 3): msg += \" Use Python 2.7.3 or newer", "'Unrecognized URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url): url = url_util.parse(url) local_path = url_util.local_file_path(url)", "links: set of links encountered while visiting the pages. \"\"\" pages = {}", "/ to the beginning of the regex to prevent # Spack from picking", "# Skip things outside the root directory if not abs_link.startswith(root): continue # Skip", "try: s3.get_object(Bucket=url.netloc, Key=url.path) return True except ClientError as err: if err.response['Error']['Code'] == 'NoSuchKey':", "is the standardized spelling for headers[0]. If headers is a sequence, then a", "import spack.s3_handler opener = spack.s3_handler.open return opener(req, *args, **kwargs) def spider(root, depth=0): \"\"\"Gets", "@daemon.setter def daemon(self, value): pass if sys.version_info[0] < 3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that", "This lets # us ignore tarballs and gigantic files. # It would be", "not at max depth, follow links. if depth < max_depth: subcalls.append((abs_link, visited, root,", "_timeout = 10 # See docstring for standardize_header_names() _separators = ('', ' ',", "True) remote_url = url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url):", "tarballs and gigantic files. # It would be nice to do this with", "not response: return pages, links page = codecs.getreader('utf-8')(response).read() pages[response_url] = page # Parse", "abs_link in visited: continue # If we're not at max depth, follow links.", "the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from", "as e: if e.errno == errno.EXDEV: # NOTE(opadron): The above move failed because", "extra_args = kwargs.get('extra_args', {}) remote_path = remote_url.path while remote_path.startswith('/'): remote_path = remote_path[1:] s3", "a new tuple is returned. This tuple has the same elements as headers,", "return True if parsed_url.scheme == 's3': endpoint_url = os.environ.get('S3_ENDPOINT_URL') if not endpoint_url: return", "in product('Cc', _separators, 'Tt')), \"Date\": set(('Date', 'date')), \"Last-modified\": set( ''.join((L, 'ast', sep, M,", "or tuple. The value is taken from the corresponding item. If the keys", "if not lurl.endswith('/'): additional_list_urls.add(lurl + '/') list_urls |= additional_list_urls # Grab some web", "zero, and max_depth is the max depth of links to follow from the", "value for the resulting item is undefined. The new dict is returned if", "need to add a / to the beginning of the regex to prevent", "picking up signature files like: # .asc # .md5 # .sha256 # .sig", "scrape. pages = {} links = set() for lurl in list_urls: pg, lnk", "header[0] that differs from it, then a new tuple is returned. This tuple", "keep_original = kwargs.get('keep_original', True) remote_url = url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and", "\"\"\" pages = {} # dict from page URL -> text content. links", "changed = False new_dict = {} for key, value in headers.items(): if isinstance(key,", "= read_from_url(url, 'text/html') if not response_url or not response: return pages, links page", "is a string, then it (or an appropriate substitute) is returned. If headers", "because it was only introduced starting # with versions 2.7.9 and 3.4.3 of", "def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape web pages for new versions of a tarball.", "SourceForge downloads still need to end in '/download'. url_regex += r'(\\/download)?$' regexes.append(url_regex) #", "elif remote_url.scheme == 's3': extra_args = kwargs.get('extra_args', {}) remote_path = remote_url.path while remote_path.startswith('/'):", "local_path = url_util.local_file_path(url) if local_path: os.remove(local_path) return if url.scheme == 's3': s3 =", "is translated to one of the above names if the only difference between", "the wildcards. # Walk through archive_url links first. # Any conflicting versions will", "depth, follow links. if depth < max_depth: subcalls.append((abs_link, visited, root, depth + 1,", "import traceback from itertools import product import six from six.moves.urllib.request import urlopen, Request", "except TypeError: pass return headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack web spidering errors.\"\"\"", "last_key def _iter_s3_prefix(client, url, num_entries=1024): key = None while True: contents, key =", "in headers map to the same key after being standardized. In all other", "(key for key in ( os.path.relpath(entry['Key'], url.path) for entry in result['Contents'] ) if", "def daemon(self): return False @daemon.setter def daemon(self, value): pass if sys.version_info[0] < 3:", "isinstance(old, six.string_types): new = standardize_header_names(old) if old is not new: return (new,) +", "MIT) from __future__ import print_function import codecs import errno import re import os", "local_path = url_util.local_file_path(url) if local_path: return os.path.exists(local_path) if url.scheme == 's3': s3 =", "sep, T, 'ype')) for C, sep, T in product('Cc', _separators, 'Tt')), \"Date\": set(('Date',", "to be done in separate steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif remote_url.scheme == 's3':", "not None: list_args['StartAfter'] = start_after result = client.list_objects_v2(**list_args) last_key = None if result['IsTruncated']:", "_separators, 'Ll')), \"Content-type\": set( ''.join((C, 'ontent', sep, T, 'ype')) for C, sep, T", "3): msg += \" Use Python 2.7.3 or newer for better HTML parsing.\"", "self).__init__( \"No network connection: \" + str(message), \"URL was: \" + str(url)) self.url", "# If we're not at max depth, follow links. if depth < max_depth:", "spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk) # Scrape them for archive URLs regexes = []", "aurl in archive_urls: list_urls |= spack.url.find_list_urls(aurl) # Add '/' to the end of", "pages visited (URL) mapped to their full text. - links: set of links", "bit more liberal and just look for the archive # part, not the", "listing of archives. Spack will scrape these pages for download links that look", "= [] for item in headers: if isinstance(item, (tuple, six.string_types)): old_item, item =", "abs_link.startswith(root): continue # Skip already-visited links if abs_link in visited: continue # If", "archive urls and any # explicitly listed list_url in the package list_urls =", "Key=url.path) return True except ClientError as err: if err.response['Error']['Code'] == 'NoSuchKey': return False", "a root URL. If depth is specified (e.g., depth=2), then this will also", "then this will also follow up to <depth> levels of links from the", "msg = \"Got an error parsing HTML.\" # Pre-2.7.3 Pythons in particular have", "a capture group for # the version part of the URL. The capture", "+ sorted(links): if any(re.search(r, url) for r in regexes): try: ver = spack.url.parse_version(url)", "os.remove(local_path) return if url.scheme == 's3': s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return #", "name, such as \"Content-encoding\", would not be altered, regardless of spelling. If headers", "list_args = dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if start_after is not None: list_args['StartAfter'] =", "levels of links from the root. This will spawn processes to fetch the", "False @daemon.setter def daemon(self, value): pass if sys.version_info[0] < 3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool", "between the two is how the first letters of each word are capitalized;", "a key that differs from that of their corresponding item in headers, or", "remote_url.netloc, remote_path, ExtraArgs=extra_args) if not keep_original: os.remove(local_file_path) else: raise NotImplementedError( 'Unrecognized URL scheme:", "end of the URL. Some web servers require this. additional_list_urls = set() for", "a generic wildcard, so we can use this to extract things # on", "= kwargs.get('extra_args', {}) remote_path = remote_url.path while remote_path.startswith('/'): remote_path = remote_path[1:] s3 =", "link_parser.links.pop() abs_link = url_util.join( response_url, raw_link.strip(), resolve_href=True) links.add(abs_link) # Skip stuff that looks", "tty.warn(\"Spack was unable to fetch url list due to a certificate \" \"verification", "considered, where the key in each item is the key of its corresponding", "us ignore tarballs and gigantic files. # It would be nice to do", "headers is a mapping, then a new dict is considered, where the key", "4, 3)) ))(sys.version_info) def read_from_url(url, accept_content_type=None): url = url_util.parse(url) context = None verify_ssl", "request when we know it's just HTML. req.get_method = lambda: \"GET\" response =", "max_depth: subcalls.append((abs_link, visited, root, depth + 1, max_depth, raise_on_error)) visited.add(abs_link) if subcalls: pool", "elements differ from their corrsponding element in headers. If headers is a mapping,", "described above are returned unaltered. For example: The standard spelling of \"Content-length\" would", "then it (or an appropriate substitute) is returned. If headers is a non-empty", "that Python's HTML parser sucks. msg = \"Got an error parsing HTML.\" #", "archive_urls: # This creates a regex from the URL with a capture group", "Server Every name considered is translated to one of the above names if", "x in contents: yield x if not key: break def list_url(url): url =", "local_path: os.remove(local_path) return if url.scheme == 's3': s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return", "# dict from page URL -> text content. links = set() # set", "from itertools import product import six from six.moves.urllib.request import urlopen, Request from six.moves.urllib.error", "\"HTMLParseError: \" + str(e)) except Exception as e: # Other types of errors", "url_util from spack.util.compression import ALLOWED_ARCHIVE_TYPES # Timeout in seconds for web requests _timeout", "for lurl in list_urls: pg, lnk = spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk) # Scrape", "if parsed_url.scheme == 's3': endpoint_url = os.environ.get('S3_ENDPOINT_URL') if not endpoint_url: return True if", "Header names that cannot be mapped as described above are returned unaltered. For", "to the end of the URL. Some web servers require this. additional_list_urls =", "an operation can't get an internet connection.\"\"\" def __init__(self, message, url): super(NoNetworkConnectionError, self).__init__(", "return True except URLError: return False def remove_url(url): url = url_util.parse(url) local_path =", "differ from their corrsponding element in headers. If headers is a mapping, then", "depth is specified (e.g., depth=2), then this will also follow up to <depth>", "of its items has a key that differs from that of their corresponding", "sep, M, 'odified')) for L, sep, M in product('Ll', _separators, 'Mm')), \"Server\": set(('Server',", "represented by the URL try: read_from_url(url) return True except URLError: return False def", "the URL. The capture group is converted # to a generic wildcard, so", "while link_parser.links: raw_link = link_parser.links.pop() abs_link = url_util.join( response_url, raw_link.strip(), resolve_href=True) links.add(abs_link) #", "tarball. Arguments: archive_urls (str or list or tuple): URL or sequence of URLs", "\"your Python to enable certificate verification.\") def push_to_url(local_file_path, remote_path, **kwargs): keep_original = kwargs.get('keep_original',", "product('Cc', _separators, 'Tt')), \"Date\": set(('Date', 'date')), \"Last-modified\": set( ''.join((L, 'ast', sep, M, 'odified'))", "import os.path import shutil import ssl import sys import traceback from itertools import", "context = ssl._create_unverified_context() req = Request(url_util.format(url)) content_type = None is_web_url = url.scheme in", "headers, or if the keys of multiple items in headers map to the", "list_url=None, list_depth=0): \"\"\"Scrape web pages for new versions of a tarball. Arguments: archive_urls", "the same key after being standardized. In all other cases headers is returned", "links = _spider(root, set(), root, 0, depth, False) return pages, links def find_versions_of_archive(archive_urls,", "raise_on_error)) visited.add(abs_link) if subcalls: pool = NonDaemonPool(processes=len(subcalls)) try: results = pool.map(_spider_wrapper, subcalls) for", "item is not old_item new_list.append(item) return new_list if changed else headers except TypeError:", "uses non-daemon processes\"\"\" def __init__(self, *args, **kwargs): kwargs['context'] = NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs)", "have rather prickly HTML parsing. if sys.version_info[:3] < (2, 7, 3): msg +=", "list_url is not None: list_urls.add(list_url) for aurl in archive_urls: list_urls |= spack.url.find_list_urls(aurl) #", "raw_link.strip(), resolve_href=True) links.add(abs_link) # Skip stuff that looks like an archive if any(raw_link.endswith(suf)", "downloads still need to end in '/download'. url_regex += r'(\\/download)?$' regexes.append(url_regex) # Build", "s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args) if not keep_original: os.remove(local_file_path) else: raise NotImplementedError( 'Unrecognized", "import spack.config import spack.error import spack.url import spack.util.crypto import spack.util.s3 as s3_util import", "Grab some web pages to scrape. pages = {} links = set() for", "not new: return (new,) + headers[1:] return headers try: changed = False new_dict", "web servers require this. additional_list_urls = set() for lurl in list_urls: if not", "them for archive URLs regexes = [] for aurl in archive_urls: # This", ".sig # However, SourceForge downloads still need to end in '/download'. url_regex +=", "that uses non-daemon processes\"\"\" Process = NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess", "full path. url_regex = os.path.basename(url_regex) # We need to add a / to", "key in each item is the key of its corresponding item in headers,", "file itself. By default, this searches the parent directories of archives. Keyword Arguments:", "'server')) } class LinkParser(HTMLParser): \"\"\"This parser just takes an HTML page and strips", "the URL with a capture group for # the version part of the", "list_urls.add(list_url) for aurl in archive_urls: list_urls |= spack.url.find_list_urls(aurl) # Add '/' to the", "if attr == 'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows sub-processes, so pools", "a new dict is considered, where the key in each item is the", "as s3_util import spack.util.url as url_util from spack.util.compression import ALLOWED_ARCHIVE_TYPES # Timeout in", "return True if url_util.parse(endpoint_url, scheme='https').scheme == 'https': return True return False __UNABLE_TO_VERIFY_SSL =", "this at your \" \"own risk.\") if raise_on_error: raise NoNetworkConnectionError(str(e), url) except HTMLParseError", "of Python. if 'context' in kwargs: del kwargs['context'] opener = urlopen if url_util.parse(url).scheme", "up to max_depth. depth should initially be zero, and max_depth is the max", "list of list_urls based on archive urls and any # explicitly listed list_url", "parameter because it was only introduced starting # with versions 2.7.9 and 3.4.3", "of a package. Typically these are just the tarballs from the package file", "steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif remote_url.scheme == 's3': extra_args = kwargs.get('extra_args', {}) remote_path", "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project", "= '/' + url_regex # We need to add a $ anchor to", "return headers old = headers[0] if isinstance(old, six.string_types): new = standardize_header_names(old) if old", "(AttributeError, TypeError, ValueError): pass try: changed = False new_list = [] for item", "spack.s3_handler.open return opener(req, *args, **kwargs) def spider(root, depth=0): \"\"\"Gets web pages from a", "import spack.cmd import spack.config import spack.error import spack.url import spack.util.crypto import spack.util.s3 as", "return new_list if changed else headers except TypeError: pass return headers class SpackWebError(spack.error.SpackError):", "\"\"\"Wrapper for compatibility with old versions of Python.\"\"\" url = req try: url", "pages that the root links to. Returns a tuple of: - pages: dict", "False def remove_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: os.remove(local_path) return", "parent directories of archives. Keyword Arguments: list_url (str or None): URL for a", "try for other URL schemes. def _list_s3_objects(client, url, num_entries, start_after=None): list_args = dict(", "_spider(url, visited, root, depth, max_depth, raise_on_error): \"\"\"Fetches URL and any pages it links", "os.remove(local_file_path) elif remote_url.scheme == 's3': extra_args = kwargs.get('extra_args', {}) remote_path = remote_url.path while", "'s3': s3 = s3_util.create_s3_session(url) return list(set( key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url)))", "in list_urls: if not lurl.endswith('/'): additional_list_urls.add(lurl + '/') list_urls |= additional_list_urls # Grab", "headers is returned unaltered. \"\"\" if isinstance(headers, six.string_types): for standardized_spelling, other_spellings in (", "**kwargs): kwargs['context'] = NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs) def uses_ssl(parsed_url): if parsed_url.scheme == 'https':", "We need to add a $ anchor to the end of the regex", "+ headers[1:] return headers try: changed = False new_dict = {} for key,", "root can't be fetched; it ignores errors with pages that the root links", "spider with multiprocessing.\"\"\" return _spider(*args) def _urlopen(req, *args, **kwargs): \"\"\"Wrapper for compatibility with", "key in _iter_s3_prefix(s3, url))) def _spider(url, visited, root, depth, max_depth, raise_on_error): \"\"\"Fetches URL", "url))) def _spider(url, visited, root, depth, max_depth, raise_on_error): \"\"\"Fetches URL and any pages", "return pages, links page = codecs.getreader('utf-8')(response).read() pages[response_url] = page # Parse out the", "new_dict[key] = value return new_dict if changed else headers except (AttributeError, TypeError, ValueError):", "Generate a list of list_urls based on archive urls and any # explicitly", "for lurl in list_urls: if not lurl.endswith('/'): additional_list_urls.add(lurl + '/') list_urls |= additional_list_urls", "for A, sep, R in product('Aa', _separators, 'Rr')), \"Content-length\": set( ''.join((C, 'ontent', sep,", "\"Content-length\" would be substituted for any of the following names: - Content-length -", "one round-trip. However, most servers seem to ignore the header # if you", "try to \"read\" from the URL, and assume that *any* # non-throwing response", "# Parse out the links in the page link_parser = LinkParser() subcalls =", "= None verify_ssl = spack.config.get('config:verify_ssl') # Don't even bother with a context unless", "are separated; or, if separated, whether they are so by a dash (-),", "If headers is a mapping, then a new dict is considered, where the", "tuple): if not headers: return headers old = headers[0] if isinstance(old, six.string_types): new", "= spack.config.get('config:verify_ssl') # Don't even bother with a context unless the URL scheme", "errno import re import os import os.path import shutil import ssl import sys", "for using spider with multiprocessing.\"\"\" return _spider(*args) def _urlopen(req, *args, **kwargs): \"\"\"Wrapper for", "3)) ))(sys.version_info) def read_from_url(url, accept_content_type=None): url = url_util.parse(url) context = None verify_ssl =", "is deprecated and never raised. class HTMLParseError(Exception): pass from llnl.util.filesystem import mkdirp import", "in _spider: %s:%s\" % (type(e), e), traceback.format_exc()) return pages, links def _spider_wrapper(args): \"\"\"Wrapper", "[] for item in headers: if isinstance(item, (tuple, six.string_types)): old_item, item = item,", "move failed because it crosses # filesystem boundaries. Copy the file (plus original", "finally: pool.terminate() pool.join() except URLError as e: tty.debug(e) if hasattr(e, 'reason') and isinstance(e.reason,", "max_depth is the max depth of links to follow from the root. Prints", "NoNetworkConnectionError(str(e), url) except HTMLParseError as e: # This error indicates that Python's HTML", "provided. context = ssl.create_default_context() else: # User has explicitly indicated that they do", "if the keys of multiple items in headers map to the same key", "prevent # Spack from picking up similarly named packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz #", "Accept: text/html. req.get_method = lambda: \"HEAD\" resp = _urlopen(req, timeout=_timeout, context=context) content_type =", "network connection: \" + str(message), \"URL was: \" + str(url)) self.url = url", "looks like an archive if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES): continue # Skip", "contentLength - content Length ... and any other header name, such as \"Content-encoding\",", "keep_original: shutil.copy(local_file_path, remote_file_path) else: try: os.rename(local_file_path, remote_file_path) except OSError as e: if e.errno", "import spack.error import spack.url import spack.util.crypto import spack.util.s3 as s3_util import spack.util.url as", "# https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/' + url_regex # We need to", "return os.path.exists(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) from botocore.exceptions import ClientError", "is not None: list_urls.add(list_url) for aurl in archive_urls: list_urls |= spack.url.find_list_urls(aurl) # Add", "it cannot be provided. warn_no_ssl_cert_checking() else: # User wants SSL verification, and it", "be substituted for any of the following names: - Content-length - content_length -", "context=context) content_type = resp.headers.get('Content-type') # Do the real GET request when we know", "boundaries. Copy the file (plus original # metadata), and then delete the original.", "= lambda: \"GET\" response = _urlopen(req, timeout=_timeout, context=context) if accept_content_type and not is_web_url:", "links def _spider_wrapper(args): \"\"\"Wrapper for using spider with multiprocessing.\"\"\" return _spider(*args) def _urlopen(req,", "subcalls) for sub_pages, sub_links in results: pages.update(sub_pages) links.update(sub_links) finally: pool.terminate() pool.join() except URLError", "== 's3': s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return # Don't even try for", "is a mapping, then a new dict is considered, where the key in", "pages for new versions of a tarball. Arguments: archive_urls (str or list or", "the original. This operation # needs to be done in separate steps. shutil.copy2(local_file_path,", "read_from_url(url, accept_content_type=None): url = url_util.parse(url) context = None verify_ssl = spack.config.get('config:verify_ssl') # Don't", "unable to fetch url list due to a certificate \" \"verification problem. You", "1)[0] for key in _iter_s3_prefix(s3, url))) def _spider(url, visited, root, depth, max_depth, raise_on_error):", "try: changed = False new_list = [] for item in headers: if isinstance(item,", "sys import traceback from itertools import product import six from six.moves.urllib.request import urlopen,", "in other_spellings: if headers == standardized_spelling: return headers return standardized_spelling return headers if", "*args, **kwargs): kwargs['context'] = NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs) def uses_ssl(parsed_url): if parsed_url.scheme ==", "just try to \"read\" from the URL, and assume that *any* # non-throwing", "lambda: \"HEAD\" resp = _urlopen(req, timeout=_timeout, context=context) content_type = resp.headers.get('Content-type') # Do the", "You can try running spack -k, \" \"which will not check SSL certificates.", "continue return versions def standardize_header_names(headers): \"\"\"Replace certain header names with standardized spellings. Standardizes", "HTML page and strips out the hrefs on the links. Good enough for", "is not old_key new_dict[key] = value return new_dict if changed else headers except", "... and any other header name, such as \"Content-encoding\", would not be altered,", "Content-length - content_length - contentlength - content_Length - contentLength - content Length ...", "visited: continue # If we're not at max depth, follow links. if depth", "# https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/' + url_regex", "explicitly indicated that they do not want SSL # verification. context = ssl._create_unverified_context()", "7, 9)) or ((3,) < pyver < (3, 4, 3)) ))(sys.version_info) def read_from_url(url,", "= lambda: \"HEAD\" resp = _urlopen(req, timeout=_timeout, context=context) content_type = resp.headers.get('Content-type') # Do", "links encountered while visiting the pages. \"\"\" pages = {} # dict from", "Skip already-visited links if abs_link in visited: continue # If we're not at", "Don't even try for other URL schemes. def _list_s3_objects(client, url, num_entries, start_after=None): list_args", "the pages. \"\"\" pages = {} # dict from page URL -> text", "item in headers: if isinstance(item, (tuple, six.string_types)): old_item, item = item, standardize_header_names(item) changed", "subcalls.append((abs_link, visited, root, depth + 1, max_depth, raise_on_error)) visited.add(abs_link) if subcalls: pool =", "a $ anchor to the end of the regex to prevent # Spack", "s3.get_object(Bucket=url.netloc, Key=url.path) return True except ClientError as err: if err.response['Error']['Code'] == 'NoSuchKey': return", "need to end in '/download'. url_regex += r'(\\/download)?$' regexes.append(url_regex) # Build a dict", "regex to prevent # Spack from picking up similarly named packages like: #", "header names: - Accept-ranges - Content-length - Content-type - Date - Last-modified -", "= spack.url.parse_version(url) versions[ver] = url except spack.url.UndetectableVersionError: continue return versions def standardize_header_names(headers): \"\"\"Replace", "content. links = set() # set of all links seen on visited pages.", "most servers seem to ignore the header # if you ask for a", "version part of the URL. The capture group is converted # to a", "https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/' + url_regex # We need to add", "regex from the URL with a capture group for # the version part", "then the value for the resulting item is undefined. The new dict is", "sequence, then a new list is considered, where each element is its corresponding", "set of all links seen on visited pages. try: response_url, _, response =", "except ClientError as err: if err.response['Error']['Code'] == 'NoSuchKey': return False raise err #", "Python.\"\"\" url = req try: url = url.get_full_url() except AttributeError: pass # We", "visited, root, depth, max_depth, raise_on_error): \"\"\"Fetches URL and any pages it links to", "similarly named packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex", "using spider with multiprocessing.\"\"\" return _spider(*args) def _urlopen(req, *args, **kwargs): \"\"\"Wrapper for compatibility", "of the following names: - Content-length - content_length - contentlength - content_Length -", "class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows sub-processes, so pools can have sub-pools.\"\"\" @property def", "URL schemes. def _list_s3_objects(client, url, num_entries, start_after=None): list_args = dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries)", "Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier:", "depth should initially be zero, and max_depth is the max depth of links", "of the URL. Some web servers require this. additional_list_urls = set() for lurl", "root, 0, depth, False) return pages, links def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape web", "= codecs.getreader('utf-8')(response).read() pages[response_url] = page # Parse out the links in the page", "if the only difference between the two is how the first letters of", "= value return new_dict if changed else headers except (AttributeError, TypeError, ValueError): pass", "except HTMLParseError as e: # This error indicates that Python's HTML parser sucks.", "from a root URL. If depth is specified (e.g., depth=2), then this will", "National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT", "be nice to do this with the HTTP Accept header to avoid #", "response def warn_no_ssl_cert_checking(): tty.warn(\"Spack will not check SSL certificates. You need to update", "to fetch the children, for much improved performance over a sequential fetch. \"\"\"", "mapped as above if a string or tuple. This new list is returned", "None: list_urls.add(list_url) for aurl in archive_urls: list_urls |= spack.url.find_list_urls(aurl) # Add '/' to", "pages from a root URL. If depth is specified (e.g., depth=2), then this", "the full path. url_regex = os.path.basename(url_regex) # We need to add a /", "one that uses # SSL certs. if uses_ssl(url): if verify_ssl: if __UNABLE_TO_VERIFY_SSL: #", "fetch url list due to a certificate \" \"verification problem. You can try", "outside the root directory if not abs_link.startswith(root): continue # Skip already-visited links if", "Request from six.moves.urllib.error import URLError import multiprocessing.pool try: # Python 2 had these", "- Content-length - Content-type - Date - Last-modified - Server Every name considered", "url_util.join( response_url, raw_link.strip(), resolve_href=True) links.add(abs_link) # Skip stuff that looks like an archive", "explicitly listed list_url in the package list_urls = set() if list_url is not", "# Skip stuff that looks like an archive if any(raw_link.endswith(suf) for suf in", "and then delete the original. This operation # needs to be done in", "running spack -k, \" \"which will not check SSL certificates. Use this at", "scheme is one that uses # SSL certs. if uses_ssl(url): if verify_ssl: if", "This operation # needs to be done in separate steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path)", "# Spack from picking up similarly named packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz", "match the wildcards. # Walk through archive_url links first. # Any conflicting versions", "= page # Parse out the links in the page link_parser = LinkParser()", "\"HEAD\" resp = _urlopen(req, timeout=_timeout, context=context) content_type = resp.headers.get('Content-type') # Do the real", "\"Content-encoding\", would not be altered, regardless of spelling. If headers is a string,", "returned. If headers is a non-empty tuple, headers[0] is a string, and there", "warn_no_ssl_cert_checking() else: # User wants SSL verification, and it *can* be provided. context", "# .md5 # .sha256 # .sig # However, SourceForge downloads still need to", "= _urlopen(req, timeout=_timeout, context=context) content_type = resp.headers.get('Content-type') # Do the real GET request", "\" \"which will not check SSL certificates. Use this at your \" \"own", "list_url (str or None): URL for a listing of archives. Spack will scrape", "is a string, and there exists a standardized spelling for header[0] that differs", "super(NonDaemonPool, self).__init__(*args, **kwargs) def uses_ssl(parsed_url): if parsed_url.scheme == 'https': return True if parsed_url.scheme", "default, this searches the parent directories of archives. Keyword Arguments: list_url (str or", "types of errors are completely ignored, except in debug mode. tty.debug(\"Error in _spider:", "and ( content_type is None or not content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format(", "sep, L, 'ength')) for C, sep, L in product('Cc', _separators, 'Ll')), \"Content-type\": set(", "if url_util.parse(endpoint_url, scheme='https').scheme == 'https': return True return False __UNABLE_TO_VERIFY_SSL = ( lambda", "standardize_header_names(headers): \"\"\"Replace certain header names with standardized spellings. Standardizes the spellings of the", "standardize_header_names() _separators = ('', ' ', '_', '-') HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\": set(", "of list_urls based on archive urls and any # explicitly listed list_url in", "page and strips out the hrefs on the links. Good enough for a", "def _spider(url, visited, root, depth, max_depth, raise_on_error): \"\"\"Fetches URL and any pages it", "returned. This tuple has the same elements as headers, except the first element", "-> URL from any links that match the wildcards. # Walk through archive_url", "False new_list = [] for item in headers: if isinstance(item, (tuple, six.string_types)): old_item,", "('http', 'https') if accept_content_type and is_web_url: # Make a HEAD request first to", "part, not the full path. url_regex = os.path.basename(url_regex) # We need to add", "particular have rather prickly HTML parsing. if sys.version_info[:3] < (2, 7, 3): msg", "Python. if 'context' in kwargs: del kwargs['context'] opener = urlopen if url_util.parse(url).scheme ==", "moved to html.parser from html.parser import HTMLParser # Also, HTMLParseError is deprecated and", "resource represented by the URL try: read_from_url(url) return True except URLError: return False", "- contentLength - content Length ... and any other header name, such as", "start_after=None): list_args = dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if start_after is not None: list_args['StartAfter']", "Some web servers require this. additional_list_urls = set() for lurl in list_urls: if", "to scrape. pages = {} links = set() for lurl in list_urls: pg,", "in regexes): try: ver = spack.url.parse_version(url) versions[ver] = url except spack.url.UndetectableVersionError: continue return", "suf in ALLOWED_ARCHIVE_TYPES): continue # Skip things outside the root directory if not", "- links: set of links encountered while visiting the pages. \"\"\" pages =", "to. Returns a tuple of: - pages: dict of pages visited (URL) mapped", "pool = NonDaemonPool(processes=len(subcalls)) try: results = pool.map(_spider_wrapper, subcalls) for sub_pages, sub_links in results:", "'ype')) for C, sep, T in product('Cc', _separators, 'Tt')), \"Date\": set(('Date', 'date')), \"Last-modified\":", "list_urls |= spack.url.find_list_urls(aurl) # Add '/' to the end of the URL. Some", "letters of each word are capitalized; whether words are separated; or, if separated,", "sub-pools.\"\"\" @property def daemon(self): return False @daemon.setter def daemon(self, value): pass if sys.version_info[0]", "will not check SSL certificates. You need to update \" \"your Python to", "with the HTTP Accept header to avoid # one round-trip. However, most servers", "from the URL, and assume that *any* # non-throwing response contains the resource", "Python 2.7.3 or newer for better HTML parsing.\" tty.warn(msg, url, \"HTMLParseError: \" +", "parsed_url.scheme == 's3': endpoint_url = os.environ.get('S3_ENDPOINT_URL') if not endpoint_url: return True if url_util.parse(endpoint_url,", "def list_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.listdir(local_path) if", "None: mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path, remote_file_path) else: try: os.rename(local_file_path, remote_file_path) except OSError as", "extract things # on a page that look like archive URLs. url_regex =", "# Python 2 had these in the HTMLParser package. from HTMLParser import HTMLParser,", "**kwargs) def uses_ssl(parsed_url): if parsed_url.scheme == 'https': return True if parsed_url.scheme == 's3':", "= standardize_header_names(old) if old is not new: return (new,) + headers[1:] return headers", "https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/' + url_regex #", "if e.errno == errno.EXDEV: # NOTE(opadron): The above move failed because it crosses", "where each element is its corresponding element in headers, but mapped as above", "True return False __UNABLE_TO_VERIFY_SSL = ( lambda pyver: ( (pyver < (2, 7,", "only introduced starting # with versions 2.7.9 and 3.4.3 of Python. if 'context'", "old is not new: return (new,) + headers[1:] return headers try: changed =", "and any other header name, such as \"Content-encoding\", would not be altered, regardless", "delete the original. This operation # needs to be done in separate steps.", "spack.url.wildcard_version(aurl) # We'll be a bit more liberal and just look for the", "in the HTMLParser package. from HTMLParser import HTMLParser, HTMLParseError except ImportError: # In", "def spider(root, depth=0): \"\"\"Gets web pages from a root URL. If depth is", "has a key that differs from that of their corresponding item in headers,", "other URL schemes. def _list_s3_objects(client, url, num_entries, start_after=None): list_args = dict( Bucket=url.netloc, Prefix=url.path,", "= url.scheme in ('http', 'https') if accept_content_type and is_web_url: # Make a HEAD", "on the links. Good enough for a really simple spider. \"\"\" def __init__(self):", "URL try: read_from_url(url) return True except URLError: return False def remove_url(url): url =", "text/html. req.get_method = lambda: \"HEAD\" resp = _urlopen(req, timeout=_timeout, context=context) content_type = resp.headers.get('Content-type')", "to the end of the regex to prevent # Spack from picking up", "mapped as above if a string or tuple. The value is taken from", "this with the HTTP Accept header to avoid # one round-trip. However, most", "out a warning only if the root can't be fetched; it ignores errors", "url = url_util.parse(url) context = None verify_ssl = spack.config.get('config:verify_ssl') # Don't even bother", "url.path) for entry in result['Contents'] ) if key != '.') return iter, last_key", "= _urlopen(req, timeout=_timeout, context=context) if accept_content_type and not is_web_url: content_type = response.headers.get('Content-type') reject_content_type", "to a certificate \" \"verification problem. You can try running spack -k, \"", "- Server Every name considered is translated to one of the above names", "with content type \" if content_type is not None else \"\", content_type or", "list is returned if at least one of its elements differ from their", "capitalized; whether words are separated; or, if separated, whether they are so by", "NotImplementedError( 'Unrecognized URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url): url = url_util.parse(url) local_path =", "for entry in result['Contents'] ) if key != '.') return iter, last_key def", "the first element is the standardized spelling for headers[0]. If headers is a", "Python to enable certificate verification.\") def push_to_url(local_file_path, remote_path, **kwargs): keep_original = kwargs.get('keep_original', True)", "the end of the regex to prevent # Spack from picking up signature", "\"Last-modified\": set( ''.join((L, 'ast', sep, M, 'odified')) for L, sep, M in product('Ll',", "tuple. The value is taken from the corresponding item. If the keys of", "multiple items in headers map to the same key after being standardized, then", "import six from six.moves.urllib.request import urlopen, Request from six.moves.urllib.error import URLError import multiprocessing.pool", "check SSL certificates. You need to update \" \"your Python to enable certificate", "= ssl._create_unverified_context() req = Request(url_util.format(url)) content_type = None is_web_url = url.scheme in ('http',", "list_depth (int): Max depth to follow links on list_url pages. Defaults to 0.", "lambda: \"GET\" response = _urlopen(req, timeout=_timeout, context=context) if accept_content_type and not is_web_url: content_type", "never raised. class HTMLParseError(Exception): pass from llnl.util.filesystem import mkdirp import llnl.util.tty as tty", "the URL scheme is one that uses # SSL certs. if uses_ssl(url): if", "unaltered. \"\"\" if isinstance(headers, six.string_types): for standardized_spelling, other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()): if headers", "and not is_web_url: content_type = response.headers.get('Content-type') reject_content_type = ( accept_content_type and ( content_type", "item is undefined. The new dict is returned if at least one of", "Accept header to avoid # one round-trip. However, most servers seem to ignore", "old_item new_list.append(item) return new_list if changed else headers except TypeError: pass return headers", "(or an appropriate substitute) is returned. If headers is a non-empty tuple, headers[0]", "7, 3): msg += \" Use Python 2.7.3 or newer for better HTML", "is considered, where each element is its corresponding element in headers, but mapped", "cannot be provided. warn_no_ssl_cert_checking() else: # User wants SSL verification, and it *can*", "\"No network connection: \" + str(message), \"URL was: \" + str(url)) self.url =", "was only introduced starting # with versions 2.7.9 and 3.4.3 of Python. if", "import shutil import ssl import sys import traceback from itertools import product import", "response = read_from_url(url, 'text/html') if not response_url or not response: return pages, links", "key = _list_s3_objects( client, url, num_entries, start_after=key) for x in contents: yield x", "is not old_item new_list.append(item) return new_list if changed else headers except TypeError: pass", "be altered, regardless of spelling. If headers is a string, then it (or", ".sha256 # .sig # However, SourceForge downloads still need to end in '/download'.", "to add a / to the beginning of the regex to prevent #", "self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows sub-processes, so pools can have sub-pools.\"\"\" @property", "regexes = [] for aurl in archive_urls: # This creates a regex from", "for # the version part of the URL. The capture group is converted", "to follow links on list_url pages. Defaults to 0. \"\"\" if not isinstance(archive_urls,", "list_urls = set() if list_url is not None: list_urls.add(list_url) for aurl in archive_urls:", "mkdirp import llnl.util.tty as tty import spack.cmd import spack.config import spack.error import spack.url", "just the tarballs from the package file itself. By default, this searches the", "servers seem to ignore the header # if you ask for a tarball", "is undefined. The new dict is returned if at least one of its", "schemes. def _list_s3_objects(client, url, num_entries, start_after=None): list_args = dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if", "A, sep, R in product('Aa', _separators, 'Rr')), \"Content-length\": set( ''.join((C, 'ontent', sep, L,", "pages, links def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape web pages for new versions of", "listed list_url in the package list_urls = set() if list_url is not None:", "# Do the real GET request when we know it's just HTML. req.get_method", "it's just HTML. req.get_method = lambda: \"GET\" response = _urlopen(req, timeout=_timeout, context=context) if", "following names: - Content-length - content_length - contentlength - content_Length - contentLength -", "__future__ import print_function import codecs import errno import re import os import os.path", "str(e)) except Exception as e: # Other types of errors are completely ignored,", "sep, R, 'anges')) for A, sep, R in product('Aa', _separators, 'Rr')), \"Content-length\": set(", "dict of pages visited (URL) mapped to their full text. - links: set", "if sys.version_info[:3] < (2, 7, 3): msg += \" Use Python 2.7.3 or", "if isinstance(headers, six.string_types): for standardized_spelling, other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()): if headers in other_spellings:", "archive # part, not the full path. url_regex = os.path.basename(url_regex) # We need", "remote_path = remote_url.path while remote_path.startswith('/'): remote_path = remote_path[1:] s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc,", "provided. warn_no_ssl_cert_checking() else: # User wants SSL verification, and it *can* be provided.", "and 3.4.3 of Python. if 'context' in kwargs: del kwargs['context'] opener = urlopen", "Spack will scrape these pages for download links that look like the archive", "url, num_entries, start_after=None): list_args = dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if start_after is not", "words are separated; or, if separated, whether they are so by a dash", "do not want SSL # verification. context = ssl._create_unverified_context() req = Request(url_util.format(url)) content_type", "errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an operation can't get an internet connection.\"\"\" def", "sub-processes, so pools can have sub-pools.\"\"\" @property def daemon(self): return False @daemon.setter def", "the beginning of the regex to prevent # Spack from picking up similarly", "L, 'ength')) for C, sep, L in product('Cc', _separators, 'Ll')), \"Content-type\": set( ''.join((C,", "\"\", content_type or \"\")) return None, None, None return response.geturl(), response.headers, response def", "is None or not content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url), \" with", "if url.scheme == 's3': s3 = s3_util.create_s3_session(url) from botocore.exceptions import ClientError try: s3.get_object(Bucket=url.netloc,", "The value is taken from the corresponding item. If the keys of multiple", "allows sub-processes, so pools can have sub-pools.\"\"\" @property def daemon(self): return False @daemon.setter", "response_url, raw_link.strip(), resolve_href=True) links.add(abs_link) # Skip stuff that looks like an archive if", "internet connection.\"\"\" def __init__(self, message, url): super(NoNetworkConnectionError, self).__init__( \"No network connection: \" +", "that *any* # non-throwing response contains the resource represented by the URL try:", "= set() # set of all links seen on visited pages. try: response_url,", "def remove_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: os.remove(local_path) return if", "def warn_no_ssl_cert_checking(): tty.warn(\"Spack will not check SSL certificates. You need to update \"", "start_after result = client.list_objects_v2(**list_args) last_key = None if result['IsTruncated']: last_key = result['Contents'][-1]['Key'] iter", "SSL certificates. Use this at your \" \"own risk.\") if raise_on_error: raise NoNetworkConnectionError(str(e),", "Accept-ranges - Content-length - Content-type - Date - Last-modified - Server Every name", "the same key after being standardized, then the value for the resulting item", "if parsed_url.scheme == 'https': return True if parsed_url.scheme == 's3': endpoint_url = os.environ.get('S3_ENDPOINT_URL')", "in headers map to the same key after being standardized, then the value", "s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args) if not keep_original: os.remove(local_file_path) else: raise NotImplementedError( 'Unrecognized URL", "any other header name, such as \"Content-encoding\", would not be altered, regardless of", "pool.terminate() pool.join() except URLError as e: tty.debug(e) if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):", "NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon", "= False new_dict = {} for key, value in headers.items(): if isinstance(key, (tuple,", "User wants SSL verification, and it *can* be provided. context = ssl.create_default_context() else:", "ssl._create_unverified_context() req = Request(url_util.format(url)) content_type = None is_web_url = url.scheme in ('http', 'https')", "return pages, links def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape web pages for new versions", "a string or tuple. The value is taken from the corresponding item. If", "# This error indicates that Python's HTML parser sucks. msg = \"Got an", "else headers except (AttributeError, TypeError, ValueError): pass try: changed = False new_list =", "whether they are so by a dash (-), underscore (_), or space (", "\"Server\": set(('Server', 'server')) } class LinkParser(HTMLParser): \"\"\"This parser just takes an HTML page", "tuple, headers[0] is a string, and there exists a standardized spelling for header[0]", "= changed or item is not old_item new_list.append(item) return new_list if changed else", "# Walk through archive_url links first. # Any conflicting versions will be overwritten", "pyver: ( (pyver < (2, 7, 9)) or ((3,) < pyver < (3,", "over a sequential fetch. \"\"\" root = url_util.parse(root) pages, links = _spider(root, set(),", "the following names: - Content-length - content_length - contentlength - content_Length - contentLength", "os.path.relpath(entry['Key'], url.path) for entry in result['Contents'] ) if key != '.') return iter,", "list_depth=0): \"\"\"Scrape web pages for new versions of a tarball. Arguments: archive_urls (str", "html.parser import HTMLParser # Also, HTMLParseError is deprecated and never raised. class HTMLParseError(Exception):", "import ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path) return True except ClientError as err: if err.response['Error']['Code']", "to the same key after being standardized, then the value for the resulting", "\"\"\"Fetches URL and any pages it links to up to max_depth. depth should", "are just the tarballs from the package file itself. By default, this searches", "))(sys.version_info) def read_from_url(url, accept_content_type=None): url = url_util.parse(url) context = None verify_ssl = spack.config.get('config:verify_ssl')", "User has explicitly indicated that they do not want SSL # verification. context", "isinstance(headers, six.string_types): for standardized_spelling, other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()): if headers in other_spellings: if", "is converted # to a generic wildcard, so we can use this to", "raise NotImplementedError( 'Unrecognized URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url): url = url_util.parse(url) local_path", "\"\"\"Raised when an operation can't get an internet connection.\"\"\" def __init__(self, message, url):", "if at least one of its items has a key that differs from", "for compatibility with old versions of Python.\"\"\" url = req try: url =", "pages = {} # dict from page URL -> text content. links =", "self.links = [] def handle_starttag(self, tag, attrs): if tag == 'a': for attr,", "pools can have sub-pools.\"\"\" @property def daemon(self): return False @daemon.setter def daemon(self, value):", "item = item, standardize_header_names(item) changed = changed or item is not old_item new_list.append(item)", "= set() for lurl in list_urls: if not lurl.endswith('/'): additional_list_urls.add(lurl + '/') list_urls", "the real GET request when we know it's just HTML. req.get_method = lambda:", "results = pool.map(_spider_wrapper, subcalls) for sub_pages, sub_links in results: pages.update(sub_pages) links.update(sub_links) finally: pool.terminate()", "is the max depth of links to follow from the root. Prints out", "in '/download'. url_regex += r'(\\/download)?$' regexes.append(url_regex) # Build a dict version -> URL", "raised. class HTMLParseError(Exception): pass from llnl.util.filesystem import mkdirp import llnl.util.tty as tty import", "as above if a string or tuple. This new list is returned if", "in each item is the key of its corresponding item in headers, mapped", "web spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an operation can't get an internet", "should initially be zero, and max_depth is the max depth of links to", "changed = changed or key is not old_key new_dict[key] = value return new_dict", "in list_urls: pg, lnk = spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk) # Scrape them for", "parser just takes an HTML page and strips out the hrefs on the", "corresponding element in headers, but mapped as above if a string or tuple.", "subcalls = [] link_parser.feed(page) while link_parser.links: raw_link = link_parser.links.pop() abs_link = url_util.join( response_url,", "# Build a dict version -> URL from any links that match the", "check SSL certificates. Use this at your \" \"own risk.\") if raise_on_error: raise", "to prevent # Spack from picking up signature files like: # .asc #", "The new dict is returned if at least one of its items has", "a certificate \" \"verification problem. You can try running spack -k, \" \"which", "import ssl import sys import traceback from itertools import product import six from", "if a string or tuple. This new list is returned if at least", "items has a key that differs from that of their corresponding item in", "content type \" if content_type is not None else \"\", content_type or \"\"))", "iter = (key for key in ( os.path.relpath(entry['Key'], url.path) for entry in result['Contents']", "links = set() # set of all links seen on visited pages. try:", "spelling for header[0] that differs from it, then a new tuple is returned.", "require this. additional_list_urls = set() for lurl in list_urls: if not lurl.endswith('/'): additional_list_urls.add(lurl", "pages, links page = codecs.getreader('utf-8')(response).read() pages[response_url] = page # Parse out the links", "its corresponding item in headers, mapped as above if a string or tuple.", "url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.path.exists(local_path) if url.scheme ==", "urlopen, Request from six.moves.urllib.error import URLError import multiprocessing.pool try: # Python 2 had", "it links to up to max_depth. depth should initially be zero, and max_depth", "or newer for better HTML parsing.\" tty.warn(msg, url, \"HTMLParseError: \" + str(e)) except", "overwritten by the list_url links. versions = {} for url in archive_urls +", "ver = spack.url.parse_version(url) versions[ver] = url except spack.url.UndetectableVersionError: continue return versions def standardize_header_names(headers):", "endpoint_url: return True if url_util.parse(endpoint_url, scheme='https').scheme == 'https': return True return False __UNABLE_TO_VERIFY_SSL", "NOTE(opadron): The above move failed because it crosses # filesystem boundaries. Copy the", "names with standardized spellings. Standardizes the spellings of the following header names: -", "for Spack web spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an operation can't get", "# otherwise, just try to \"read\" from the URL, and assume that *any*", "additional_list_urls.add(lurl + '/') list_urls |= additional_list_urls # Grab some web pages to scrape.", "not old_item new_list.append(item) return new_list if changed else headers except TypeError: pass return", "# SPDX-License-Identifier: (Apache-2.0 OR MIT) from __future__ import print_function import codecs import errno", "'context' parameter because it was only introduced starting # with versions 2.7.9 and", "depth to follow links on list_url pages. Defaults to 0. \"\"\" if not", "not check SSL certificates. Use this at your \" \"own risk.\") if raise_on_error:", "unaltered. For example: The standard spelling of \"Content-length\" would be substituted for any", "Copy the file (plus original # metadata), and then delete the original. This", "url.get_full_url() except AttributeError: pass # We don't pass 'context' parameter because it was", "# See docstring for standardize_header_names() _separators = ('', ' ', '_', '-') HTTP_HEADER_NAME_ALIASES", "operation can't get an internet connection.\"\"\" def __init__(self, message, url): super(NoNetworkConnectionError, self).__init__( \"No", "is returned if at least one of its items has a key that", "URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if", "content_type is not None else \"\", content_type or \"\")) return None, None, None", "os.rename(local_file_path, remote_file_path) except OSError as e: if e.errno == errno.EXDEV: # NOTE(opadron): The", "above if a string or tuple. The value is taken from the corresponding", "spack.util.s3 as s3_util import spack.util.url as url_util from spack.util.compression import ALLOWED_ARCHIVE_TYPES # Timeout", "== 'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows sub-processes, so pools can have", "at max depth, follow links. if depth < max_depth: subcalls.append((abs_link, visited, root, depth", "list_args['StartAfter'] = start_after result = client.list_objects_v2(**list_args) last_key = None if result['IsTruncated']: last_key =", "shutil.copy(local_file_path, remote_file_path) else: try: os.rename(local_file_path, remote_file_path) except OSError as e: if e.errno ==", "a regex from the URL with a capture group for # the version", "link_parser.links: raw_link = link_parser.links.pop() abs_link = url_util.join( response_url, raw_link.strip(), resolve_href=True) links.add(abs_link) # Skip", "or, if separated, whether they are so by a dash (-), underscore (_),", "return os.listdir(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) return list(set( key.split('/', 1)[0]", "return headers if isinstance(headers, tuple): if not headers: return headers old = headers[0]", "unless the URL scheme is one that uses # SSL certs. if uses_ssl(url):", "the URL, and assume that *any* # non-throwing response contains the resource represented", "[] link_parser.feed(page) while link_parser.links: raw_link = link_parser.links.pop() abs_link = url_util.join( response_url, raw_link.strip(), resolve_href=True)", "# Spack from picking up signature files like: # .asc # .md5 #", "False raise err # otherwise, just try to \"read\" from the URL, and", "versions 2.7.9 and 3.4.3 of Python. if 'context' in kwargs: del kwargs['context'] opener", "product('Ll', _separators, 'Mm')), \"Server\": set(('Server', 'server')) } class LinkParser(HTMLParser): \"\"\"This parser just takes", "daemon(self): return False @daemon.setter def daemon(self, value): pass if sys.version_info[0] < 3: class", "an archive if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES): continue # Skip things outside", "OR MIT) from __future__ import print_function import codecs import errno import re import", "if err.response['Error']['Code'] == 'NoSuchKey': return False raise err # otherwise, just try to", "# the version part of the URL. The capture group is converted #", "know it's just HTML. req.get_method = lambda: \"GET\" response = _urlopen(req, timeout=_timeout, context=context)", "wildcard, so we can use this to extract things # on a page", "its corresponding element in headers, but mapped as above if a string or", "sep, M in product('Ll', _separators, 'Mm')), \"Server\": set(('Server', 'server')) } class LinkParser(HTMLParser): \"\"\"This", "= url except spack.url.UndetectableVersionError: continue return versions def standardize_header_names(headers): \"\"\"Replace certain header names", "# explicitly listed list_url in the package list_urls = set() if list_url is", "of multiple items in headers map to the same key after being standardized.", "value): pass if sys.version_info[0] < 3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\"", "metadata), and then delete the original. This operation # needs to be done", "if not keep_original: os.remove(local_file_path) else: raise NotImplementedError( 'Unrecognized URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def", "from __future__ import print_function import codecs import errno import re import os import", "its items has a key that differs from that of their corresponding item", "sep, L in product('Cc', _separators, 'Ll')), \"Content-type\": set( ''.join((C, 'ontent', sep, T, 'ype'))", "Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the", "of pages visited (URL) mapped to their full text. - links: set of", "wants SSL verification, but it cannot be provided. warn_no_ssl_cert_checking() else: # User wants", "from llnl.util.filesystem import mkdirp import llnl.util.tty as tty import spack.cmd import spack.config import", "HTML parsing.\" tty.warn(msg, url, \"HTMLParseError: \" + str(e)) except Exception as e: #", "if isinstance(old, six.string_types): new = standardize_header_names(old) if old is not new: return (new,)", "not is_web_url: content_type = response.headers.get('Content-type') reject_content_type = ( accept_content_type and ( content_type is", "LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for", "'text/html') if not response_url or not response: return pages, links page = codecs.getreader('utf-8')(response).read()", "in attrs: if attr == 'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows sub-processes,", "not abs_link.startswith(root): continue # Skip already-visited links if abs_link in visited: continue #", "_urlopen(req, timeout=_timeout, context=context) if accept_content_type and not is_web_url: content_type = response.headers.get('Content-type') reject_content_type =", "The standard spelling of \"Content-length\" would be substituted for any of the following", "(2, 7, 3): msg += \" Use Python 2.7.3 or newer for better", "is returned if at least one of its elements differ from their corrsponding", "specified (e.g., depth=2), then this will also follow up to <depth> levels of", "the following header names: - Accept-ranges - Content-length - Content-type - Date -", "try: changed = False new_dict = {} for key, value in headers.items(): if", "_separators = ('', ' ', '_', '-') HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\": set( ''.join((A,", "wildcards. # Walk through archive_url links first. # Any conflicting versions will be", "\"\"\"Gets web pages from a root URL. If depth is specified (e.g., depth=2),", "to the same key after being standardized. In all other cases headers is", "by the URL try: read_from_url(url) return True except URLError: return False def remove_url(url):", "= url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path", "kwargs.get('keep_original', True) remote_url = url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and verify_ssl and", "key, standardize_header_names(key) changed = changed or key is not old_key new_dict[key] = value", "if accept_content_type and is_web_url: # Make a HEAD request first to check the", "HTML parser sucks. msg = \"Got an error parsing HTML.\" # Pre-2.7.3 Pythons", "else \"\", content_type or \"\")) return None, None, None return response.geturl(), response.headers, response", "on visited pages. try: response_url, _, response = read_from_url(url, 'text/html') if not response_url", "in headers. If headers is a mapping, then a new dict is considered,", "new_list if changed else headers except TypeError: pass return headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass", "= remote_path[1:] s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args) if not keep_original: os.remove(local_file_path)", "NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" def __init__(self,", "for other URL schemes. def _list_s3_objects(client, url, num_entries, start_after=None): list_args = dict( Bucket=url.netloc,", "# verification. context = ssl._create_unverified_context() req = Request(url_util.format(url)) content_type = None is_web_url =", "( (pyver < (2, 7, 9)) or ((3,) < pyver < (3, 4,", "of each word are capitalized; whether words are separated; or, if separated, whether", "value return new_dict if changed else headers except (AttributeError, TypeError, ValueError): pass try:", "not None else \"\", content_type or \"\")) return None, None, None return response.geturl(),", "links. if depth < max_depth: subcalls.append((abs_link, visited, root, depth + 1, max_depth, raise_on_error))", "< (2, 7, 3): msg += \" Use Python 2.7.3 or newer for", "new versions of a tarball. Arguments: archive_urls (str or list or tuple): URL", "set( ''.join((L, 'ast', sep, M, 'odified')) for L, sep, M in product('Ll', _separators,", "'.') return iter, last_key def _iter_s3_prefix(client, url, num_entries=1024): key = None while True:", "import spack.url import spack.util.crypto import spack.util.s3 as s3_util import spack.util.url as url_util from", "same key after being standardized, then the value for the resulting item is", "L in product('Cc', _separators, 'Ll')), \"Content-type\": set( ''.join((C, 'ontent', sep, T, 'ype')) for", "\"Content-type\": set( ''.join((C, 'ontent', sep, T, 'ype')) for C, sep, T in product('Cc',", "L, sep, M in product('Ll', _separators, 'Mm')), \"Server\": set(('Server', 'server')) } class LinkParser(HTMLParser):", "to prevent # Spack from picking up similarly named packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz", "six.string_types): for standardized_spelling, other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()): if headers in other_spellings: if headers", "certificates. You need to update \" \"your Python to enable certificate verification.\") def", "except spack.url.UndetectableVersionError: continue return versions def standardize_header_names(headers): \"\"\"Replace certain header names with standardized", "kwargs['context'] opener = urlopen if url_util.parse(url).scheme == 's3': import spack.s3_handler opener = spack.s3_handler.open", "r in regexes): try: ver = spack.url.parse_version(url) versions[ver] = url except spack.url.UndetectableVersionError: continue", "item in headers, or if the keys of multiple items in headers map", "req.get_method = lambda: \"HEAD\" resp = _urlopen(req, timeout=_timeout, context=context) content_type = resp.headers.get('Content-type') #", "\"Content-length\": set( ''.join((C, 'ontent', sep, L, 'ength')) for C, sep, L in product('Cc',", "this will also follow up to <depth> levels of links from the root.", "ALLOWED_ARCHIVE_TYPES): continue # Skip things outside the root directory if not abs_link.startswith(root): continue", "'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows sub-processes, so pools can have sub-pools.\"\"\"", "a mapping, then a new dict is considered, where the key in each", "\" with content type \" if content_type is not None else \"\", content_type", "isinstance(key, (tuple, six.string_types)): old_key, key = key, standardize_header_names(key) changed = changed or key", "= spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url) if", "due to a certificate \" \"verification problem. You can try running spack -k,", "in visited: continue # If we're not at max depth, follow links. if", "that look like the archive URL. list_depth (int): Max depth to follow links", "of URLs for different versions of a package. Typically these are just the", "ALLOWED_ARCHIVE_TYPES # Timeout in seconds for web requests _timeout = 10 # See", "set(('Server', 'server')) } class LinkParser(HTMLParser): \"\"\"This parser just takes an HTML page and", "pg, lnk = spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk) # Scrape them for archive URLs", "sep, T in product('Cc', _separators, 'Tt')), \"Date\": set(('Date', 'date')), \"Last-modified\": set( ''.join((L, 'ast',", "\"\"\" if not isinstance(archive_urls, (list, tuple)): archive_urls = [archive_urls] # Generate a list", "(plus original # metadata), and then delete the original. This operation # needs", "visited (URL) mapped to their full text. - links: set of links encountered", "name considered is translated to one of the above names if the only", "can't get an internet connection.\"\"\" def __init__(self, message, url): super(NoNetworkConnectionError, self).__init__( \"No network", "Process = NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" def __init__(self, *args,", "NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs) def uses_ssl(parsed_url): if parsed_url.scheme == 'https': return True if", "above are returned unaltered. For example: The standard spelling of \"Content-length\" would be", "# metadata), and then delete the original. This operation # needs to be", "spelling. If headers is a string, then it (or an appropriate substitute) is", "separated, whether they are so by a dash (-), underscore (_), or space", "corrsponding element in headers. If headers is a mapping, then a new dict", "same elements as headers, except the first element is the standardized spelling for", "or ((3,) < pyver < (3, 4, 3)) ))(sys.version_info) def read_from_url(url, accept_content_type=None): url", "- contentlength - content_Length - contentLength - content Length ... and any other", "shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif remote_url.scheme == 's3': extra_args = kwargs.get('extra_args', {}) remote_path =", "to do this with the HTTP Accept header to avoid # one round-trip.", "headers if isinstance(headers, tuple): if not headers: return headers old = headers[0] if", "a warning only if the root can't be fetched; it ignores errors with", "'s3': s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return # Don't even try for other", "things # on a page that look like archive URLs. url_regex = spack.url.wildcard_version(aurl)", "or \"\")) return None, None, None return response.geturl(), response.headers, response def warn_no_ssl_cert_checking(): tty.warn(\"Spack", "url_util.parse(url) context = None verify_ssl = spack.config.get('config:verify_ssl') # Don't even bother with a", "None return response.geturl(), response.headers, response def warn_no_ssl_cert_checking(): tty.warn(\"Spack will not check SSL certificates.", "product import six from six.moves.urllib.request import urlopen, Request from six.moves.urllib.error import URLError import", "to 0. \"\"\" if not isinstance(archive_urls, (list, tuple)): archive_urls = [archive_urls] # Generate", "multiprocessing.\"\"\" return _spider(*args) def _urlopen(req, *args, **kwargs): \"\"\"Wrapper for compatibility with old versions", "del kwargs['context'] opener = urlopen if url_util.parse(url).scheme == 's3': import spack.s3_handler opener =", "Skip stuff that looks like an archive if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES):", "_list_s3_objects( client, url, num_entries, start_after=key) for x in contents: yield x if not", "visited.add(abs_link) if subcalls: pool = NonDaemonPool(processes=len(subcalls)) try: results = pool.map(_spider_wrapper, subcalls) for sub_pages,", "we're not at max depth, follow links. if depth < max_depth: subcalls.append((abs_link, visited,", "import sys import traceback from itertools import product import six from six.moves.urllib.request import", "key that differs from that of their corresponding item in headers, or if", "\" + str(e)) except Exception as e: # Other types of errors are", "links on list_url pages. Defaults to 0. \"\"\" if not isinstance(archive_urls, (list, tuple)):", "depth, False) return pages, links def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape web pages for", "spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an operation can't get an internet connection.\"\"\"", "list_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.listdir(local_path) if url.scheme", "hrefs on the links. Good enough for a really simple spider. \"\"\" def", "pages, links = _spider(root, set(), root, 0, depth, False) return pages, links def", "Spack from picking up signature files like: # .asc # .md5 # .sha256", "links page = codecs.getreader('utf-8')(response).read() pages[response_url] = page # Parse out the links in", "old_key, key = key, standardize_header_names(key) changed = changed or key is not old_key", "remote_path.startswith('/'): remote_path = remote_path[1:] s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args) if not", "group is converted # to a generic wildcard, so we can use this", "= url_util.local_file_path(remote_url) if remote_file_path is not None: mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path, remote_file_path) else:", "**kwargs): keep_original = kwargs.get('keep_original', True) remote_url = url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL", "page that look like archive URLs. url_regex = spack.url.wildcard_version(aurl) # We'll be a", "content_type = response.headers.get('Content-type') reject_content_type = ( accept_content_type and ( content_type is None or", "= response.headers.get('Content-type') reject_content_type = ( accept_content_type and ( content_type is None or not", "lambda pyver: ( (pyver < (2, 7, 9)) or ((3,) < pyver <", "that match the wildcards. # Walk through archive_url links first. # Any conflicting", "headers is a string, then it (or an appropriate substitute) is returned. If", "For example: The standard spelling of \"Content-length\" would be substituted for any of", "header names with standardized spellings. Standardizes the spellings of the following header names:", "iter, last_key def _iter_s3_prefix(client, url, num_entries=1024): key = None while True: contents, key", "that of their corresponding item in headers, or if the keys of multiple", "changed or key is not old_key new_dict[key] = value return new_dict if changed", "page # Parse out the links in the page link_parser = LinkParser() subcalls", "remote_path = remote_path[1:] s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args) if not keep_original:", "response contains the resource represented by the URL try: read_from_url(url) return True except", "= _list_s3_objects( client, url, num_entries, start_after=key) for x in contents: yield x if", "def _spider_wrapper(args): \"\"\"Wrapper for using spider with multiprocessing.\"\"\" return _spider(*args) def _urlopen(req, *args,", "key, value in headers.items(): if isinstance(key, (tuple, six.string_types)): old_key, key = key, standardize_header_names(key)", "urlopen if url_util.parse(url).scheme == 's3': import spack.s3_handler opener = spack.s3_handler.open return opener(req, *args,", "processes\"\"\" Process = NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool", "( lambda pyver: ( (pyver < (2, 7, 9)) or ((3,) < pyver", "need to add a $ anchor to the end of the regex to", "- content Length ... and any other header name, such as \"Content-encoding\", would", "two is how the first letters of each word are capitalized; whether words", "new list is returned if at least one of its elements differ from", "that look like archive URLs. url_regex = spack.url.wildcard_version(aurl) # We'll be a bit", "content_type or \"\")) return None, None, None return response.geturl(), response.headers, response def warn_no_ssl_cert_checking():", "\"\")) return None, None, None return response.geturl(), response.headers, response def warn_no_ssl_cert_checking(): tty.warn(\"Spack will", "# needs to be done in separate steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif remote_url.scheme", "the URL. Some web servers require this. additional_list_urls = set() for lurl in", "to check the content type. This lets # us ignore tarballs and gigantic", "directories of archives. Keyword Arguments: list_url (str or None): URL for a listing", "for archive URLs regexes = [] for aurl in archive_urls: # This creates", "continue # Skip already-visited links if abs_link in visited: continue # If we're", "up signature files like: # .asc # .md5 # .sha256 # .sig #", "warning only if the root can't be fetched; it ignores errors with pages", "-> text content. links = set() # set of all links seen on", "LinkParser() subcalls = [] link_parser.feed(page) while link_parser.links: raw_link = link_parser.links.pop() abs_link = url_util.join(", "'ontent', sep, T, 'ype')) for C, sep, T in product('Cc', _separators, 'Tt')), \"Date\":", "non-daemon processes\"\"\" Process = NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool):", "one of its elements differ from their corrsponding element in headers. If headers", "starting # with versions 2.7.9 and 3.4.3 of Python. if 'context' in kwargs:", "this to extract things # on a page that look like archive URLs.", "# part, not the full path. url_regex = os.path.basename(url_regex) # We need to", "was unable to fetch url list due to a certificate \" \"verification problem.", "reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url), \" with content type \" if content_type is", "seem to ignore the header # if you ask for a tarball with", "NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an operation can't get an internet connection.\"\"\" def __init__(self, message,", "simple spider. \"\"\" def __init__(self): HTMLParser.__init__(self) self.links = [] def handle_starttag(self, tag, attrs):", "max depth, follow links. if depth < max_depth: subcalls.append((abs_link, visited, root, depth +", "if reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url), \" with content type \" if content_type", "None if result['IsTruncated']: last_key = result['Contents'][-1]['Key'] iter = (key for key in (", "( HTTP_HEADER_NAME_ALIASES.items()): if headers in other_spellings: if headers == standardized_spelling: return headers return", "content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url), \" with content type \" if", "the HTTP Accept header to avoid # one round-trip. However, most servers seem", "they are so by a dash (-), underscore (_), or space ( ).", "# We don't pass 'context' parameter because it was only introduced starting #", "visited pages. try: response_url, _, response = read_from_url(url, 'text/html') if not response_url or", "tty.debug(e) if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was unable to fetch url", "root URL. If depth is specified (e.g., depth=2), then this will also follow", "return None, None, None return response.geturl(), response.headers, response def warn_no_ssl_cert_checking(): tty.warn(\"Spack will not", "are so by a dash (-), underscore (_), or space ( ). Header", "in product('Aa', _separators, 'Rr')), \"Content-length\": set( ''.join((C, 'ontent', sep, L, 'ength')) for C,", "and just look for the archive # part, not the full path. url_regex", "standardized spelling for header[0] that differs from it, then a new tuple is", "changed or item is not old_item new_list.append(item) return new_list if changed else headers", "following header names: - Accept-ranges - Content-length - Content-type - Date - Last-modified", "''.join((L, 'ast', sep, M, 'odified')) for L, sep, M in product('Ll', _separators, 'Mm')),", "this searches the parent directories of archives. Keyword Arguments: list_url (str or None):", "__UNABLE_TO_VERIFY_SSL = ( lambda pyver: ( (pyver < (2, 7, 9)) or ((3,)", "files. # It would be nice to do this with the HTTP Accept", "Parse out the links in the page link_parser = LinkParser() subcalls = []", "regex to prevent # Spack from picking up signature files like: # .asc", "prickly HTML parsing. if sys.version_info[:3] < (2, 7, 3): msg += \" Use", "= spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk) # Scrape them for archive URLs regexes =", "indicates that Python's HTML parser sucks. msg = \"Got an error parsing HTML.\"", "archive_urls + sorted(links): if any(re.search(r, url) for r in regexes): try: ver =", "separated; or, if separated, whether they are so by a dash (-), underscore", "versions of a package. Typically these are just the tarballs from the package", "depth + 1, max_depth, raise_on_error)) visited.add(abs_link) if subcalls: pool = NonDaemonPool(processes=len(subcalls)) try: results", "links that match the wildcards. # Walk through archive_url links first. # Any", "a dash (-), underscore (_), or space ( ). Header names that cannot", "( os.path.relpath(entry['Key'], url.path) for entry in result['Contents'] ) if key != '.') return", "if key != '.') return iter, last_key def _iter_s3_prefix(client, url, num_entries=1024): key =", "# In Python 3, things moved to html.parser from html.parser import HTMLParser #", "traceback from itertools import product import six from six.moves.urllib.request import urlopen, Request from", "at least one of its items has a key that differs from that", "is how the first letters of each word are capitalized; whether words are", "old_item, item = item, standardize_header_names(item) changed = changed or item is not old_item", "'s3': s3 = s3_util.create_s3_session(url) from botocore.exceptions import ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path) return True", "remote_file_path is not None: mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path, remote_file_path) else: try: os.rename(local_file_path, remote_file_path)", "return response.geturl(), response.headers, response def warn_no_ssl_cert_checking(): tty.warn(\"Spack will not check SSL certificates. You", "None): URL for a listing of archives. Spack will scrape these pages for", "} class LinkParser(HTMLParser): \"\"\"This parser just takes an HTML page and strips out", "timeout=_timeout, context=context) content_type = resp.headers.get('Content-type') # Do the real GET request when we", "names: - Accept-ranges - Content-length - Content-type - Date - Last-modified - Server", "spack.util.crypto import spack.util.s3 as s3_util import spack.util.url as url_util from spack.util.compression import ALLOWED_ARCHIVE_TYPES", "with standardized spellings. Standardizes the spellings of the following header names: - Accept-ranges", "max_depth, raise_on_error)) visited.add(abs_link) if subcalls: pool = NonDaemonPool(processes=len(subcalls)) try: results = pool.map(_spider_wrapper, subcalls)", "if changed else headers except (AttributeError, TypeError, ValueError): pass try: changed = False", "opener(req, *args, **kwargs) def spider(root, depth=0): \"\"\"Gets web pages from a root URL.", "timeout=_timeout, context=context) if accept_content_type and not is_web_url: content_type = response.headers.get('Content-type') reject_content_type = (", "children, for much improved performance over a sequential fetch. \"\"\" root = url_util.parse(root)", "= NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs) def uses_ssl(parsed_url): if parsed_url.scheme == 'https': return True", "in archive_urls: list_urls |= spack.url.find_list_urls(aurl) # Add '/' to the end of the", "True: contents, key = _list_s3_objects( client, url, num_entries, start_after=key) for x in contents:", "- Content-type - Date - Last-modified - Server Every name considered is translated", "url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: os.remove(local_path) return if url.scheme ==", "except Exception as e: # Other types of errors are completely ignored, except", "appropriate substitute) is returned. If headers is a non-empty tuple, headers[0] is a", "''.join((C, 'ontent', sep, L, 'ength')) for C, sep, L in product('Cc', _separators, 'Ll')),", "of the above names if the only difference between the two is how", "return (new,) + headers[1:] return headers try: changed = False new_dict = {}", "# NOTE(opadron): The above move failed because it crosses # filesystem boundaries. Copy", "raise NoNetworkConnectionError(str(e), url) except HTMLParseError as e: # This error indicates that Python's", "a tarball. Arguments: archive_urls (str or list or tuple): URL or sequence of", "_spider(root, set(), root, 0, depth, False) return pages, links def find_versions_of_archive(archive_urls, list_url=None, list_depth=0):", "def __init__(self, *args, **kwargs): kwargs['context'] = NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs) def uses_ssl(parsed_url): if", "word are capitalized; whether words are separated; or, if separated, whether they are", "regardless of spelling. If headers is a string, then it (or an appropriate", "visited, root, depth + 1, max_depth, raise_on_error)) visited.add(abs_link) if subcalls: pool = NonDaemonPool(processes=len(subcalls))", "set() if list_url is not None: list_urls.add(list_url) for aurl in archive_urls: list_urls |=", "substituted for any of the following names: - Content-length - content_length - contentlength", "things moved to html.parser from html.parser import HTMLParser # Also, HTMLParseError is deprecated", "spack.util.url as url_util from spack.util.compression import ALLOWED_ARCHIVE_TYPES # Timeout in seconds for web", "{0}{1}{2}\".format( url_util.format(url), \" with content type \" if content_type is not None else", "else headers except TypeError: pass return headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack web", "HTMLParser import HTMLParser, HTMLParseError except ImportError: # In Python 3, things moved to", "9)) or ((3,) < pyver < (3, 4, 3)) ))(sys.version_info) def read_from_url(url, accept_content_type=None):", "string, and there exists a standardized spelling for header[0] that differs from it,", "text content. links = set() # set of all links seen on visited", "six.string_types)): old_item, item = item, standardize_header_names(item) changed = changed or item is not", "URLError import multiprocessing.pool try: # Python 2 had these in the HTMLParser package.", "verification, and it *can* be provided. context = ssl.create_default_context() else: # User has", "spellings of the following header names: - Accept-ranges - Content-length - Content-type -", "spack.util.compression import ALLOWED_ARCHIVE_TYPES # Timeout in seconds for web requests _timeout = 10", "class HTMLParseError(Exception): pass from llnl.util.filesystem import mkdirp import llnl.util.tty as tty import spack.cmd", "Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file", "the key of its corresponding item in headers, mapped as above if a", "spack.config import spack.error import spack.url import spack.util.crypto import spack.util.s3 as s3_util import spack.util.url", "def daemon(self, value): pass if sys.version_info[0] < 3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses", "url_util.parse(endpoint_url, scheme='https').scheme == 'https': return True return False __UNABLE_TO_VERIFY_SSL = ( lambda pyver:", "pages = {} links = set() for lurl in list_urls: pg, lnk =", "[] for aurl in archive_urls: # This creates a regex from the URL", "spack.cmd import spack.config import spack.error import spack.url import spack.util.crypto import spack.util.s3 as s3_util", "req.get_method = lambda: \"GET\" response = _urlopen(req, timeout=_timeout, context=context) if accept_content_type and not", "on archive urls and any # explicitly listed list_url in the package list_urls", "read_from_url(url, 'text/html') if not response_url or not response: return pages, links page =", "verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url) if remote_file_path is not None: mkdirp(os.path.dirname(remote_file_path))", "root links to. Returns a tuple of: - pages: dict of pages visited", "Skip things outside the root directory if not abs_link.startswith(root): continue # Skip already-visited", "in seconds for web requests _timeout = 10 # See docstring for standardize_header_names()", "non-daemon processes\"\"\" def __init__(self, *args, **kwargs): kwargs['context'] = NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs) def", "product('Aa', _separators, 'Rr')), \"Content-length\": set( ''.join((C, 'ontent', sep, L, 'ength')) for C, sep,", "class SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack web spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an", "will also follow up to <depth> levels of links from the root. This", "for the resulting item is undefined. The new dict is returned if at", "-k, \" \"which will not check SSL certificates. Use this at your \"", "NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows sub-processes, so pools can have sub-pools.\"\"\" @property def daemon(self):", "'anges')) for A, sep, R in product('Aa', _separators, 'Rr')), \"Content-length\": set( ''.join((C, 'ontent',", "url.scheme == 's3': s3 = s3_util.create_s3_session(url) from botocore.exceptions import ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path)", "attrs): if tag == 'a': for attr, val in attrs: if attr ==", "'_', '-') HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\": set( ''.join((A, 'ccept', sep, R, 'anges')) for", "all links seen on visited pages. try: response_url, _, response = read_from_url(url, 'text/html')", "corresponding item in headers, or if the keys of multiple items in headers", "root. Prints out a warning only if the root can't be fetched; it", "'s3': endpoint_url = os.environ.get('S3_ENDPOINT_URL') if not endpoint_url: return True if url_util.parse(endpoint_url, scheme='https').scheme ==", "the content type. This lets # us ignore tarballs and gigantic files. #", "is_web_url: content_type = response.headers.get('Content-type') reject_content_type = ( accept_content_type and ( content_type is None", "in ALLOWED_ARCHIVE_TYPES): continue # Skip things outside the root directory if not abs_link.startswith(root):", "html.parser from html.parser import HTMLParser # Also, HTMLParseError is deprecated and never raised.", "bother with a context unless the URL scheme is one that uses #", "the end of the URL. Some web servers require this. additional_list_urls = set()", "= set() for lurl in list_urls: pg, lnk = spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk)", "six.moves.urllib.error import URLError import multiprocessing.pool try: # Python 2 had these in the", "pass # We don't pass 'context' parameter because it was only introduced starting", "path. url_regex = os.path.basename(url_regex) # We need to add a / to the", "want SSL # verification. context = ssl._create_unverified_context() req = Request(url_util.format(url)) content_type = None", "only difference between the two is how the first letters of each word", "that they do not want SSL # verification. context = ssl._create_unverified_context() req =", "# one round-trip. However, most servers seem to ignore the header # if", "package list_urls = set() if list_url is not None: list_urls.add(list_url) for aurl in", "named packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex =", "= {} for key, value in headers.items(): if isinstance(key, (tuple, six.string_types)): old_key, key", "= _spider(root, set(), root, 0, depth, False) return pages, links def find_versions_of_archive(archive_urls, list_url=None,", "Make a HEAD request first to check the content type. This lets #", "standardized spelling for headers[0]. If headers is a sequence, then a new list", "from the root. This will spawn processes to fetch the children, for much", "from picking up signature files like: # .asc # .md5 # .sha256 #", "a string, then it (or an appropriate substitute) is returned. If headers is", "a / to the beginning of the regex to prevent # Spack from", ") if key != '.') return iter, last_key def _iter_s3_prefix(client, url, num_entries=1024): key", "such as \"Content-encoding\", would not be altered, regardless of spelling. If headers is", "round-trip. However, most servers seem to ignore the header # if you ask", "headers return standardized_spelling return headers if isinstance(headers, tuple): if not headers: return headers", "list_url in the package list_urls = set() if list_url is not None: list_urls.add(list_url)", "URL from any links that match the wildcards. # Walk through archive_url links", "in headers, but mapped as above if a string or tuple. This new", "even try for other URL schemes. def _list_s3_objects(client, url, num_entries, start_after=None): list_args =", "req try: url = url.get_full_url() except AttributeError: pass # We don't pass 'context'", "their corresponding item in headers, or if the keys of multiple items in", "if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url) if remote_file_path is", "in _iter_s3_prefix(s3, url))) def _spider(url, visited, root, depth, max_depth, raise_on_error): \"\"\"Fetches URL and", "= item, standardize_header_names(item) changed = changed or item is not old_item new_list.append(item) return", "item in headers, mapped as above if a string or tuple. The value", "the package file itself. By default, this searches the parent directories of archives.", "in the package list_urls = set() if list_url is not None: list_urls.add(list_url) for", "remote_path[1:] s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args) if not keep_original: os.remove(local_file_path) else:", "SSL certificates. You need to update \" \"your Python to enable certificate verification.\")", "if url.scheme == 's3': s3 = s3_util.create_s3_session(url) return list(set( key.split('/', 1)[0] for key", "list_urls: if not lurl.endswith('/'): additional_list_urls.add(lurl + '/') list_urls |= additional_list_urls # Grab some", "HTMLParser # Also, HTMLParseError is deprecated and never raised. class HTMLParseError(Exception): pass from", "links in the page link_parser = LinkParser() subcalls = [] link_parser.feed(page) while link_parser.links:", "= client.list_objects_v2(**list_args) last_key = None if result['IsTruncated']: last_key = result['Contents'][-1]['Key'] iter = (key", "req = Request(url_util.format(url)) content_type = None is_web_url = url.scheme in ('http', 'https') if", "packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/'", "'https': return True if parsed_url.scheme == 's3': endpoint_url = os.environ.get('S3_ENDPOINT_URL') if not endpoint_url:", "# Any conflicting versions will be overwritten by the list_url links. versions =", "pool.map(_spider_wrapper, subcalls) for sub_pages, sub_links in results: pages.update(sub_pages) links.update(sub_links) finally: pool.terminate() pool.join() except", "return # Don't even try for other URL schemes. def _list_s3_objects(client, url, num_entries,", "being standardized, then the value for the resulting item is undefined. The new", "headers old = headers[0] if isinstance(old, six.string_types): new = standardize_header_names(old) if old is", "# It would be nice to do this with the HTTP Accept header", "Also, HTMLParseError is deprecated and never raised. class HTMLParseError(Exception): pass from llnl.util.filesystem import", "fetched; it ignores errors with pages that the root links to. Returns a", "set() for lurl in list_urls: if not lurl.endswith('/'): additional_list_urls.add(lurl + '/') list_urls |=", "old versions of Python.\"\"\" url = req try: url = url.get_full_url() except AttributeError:", "None: list_args['StartAfter'] = start_after result = client.list_objects_v2(**list_args) last_key = None if result['IsTruncated']: last_key", "return new_dict if changed else headers except (AttributeError, TypeError, ValueError): pass try: changed", "links.update(sub_links) finally: pool.terminate() pool.join() except URLError as e: tty.debug(e) if hasattr(e, 'reason') and", "is the key of its corresponding item in headers, mapped as above if", "We'll be a bit more liberal and just look for the archive #", "of errors are completely ignored, except in debug mode. tty.debug(\"Error in _spider: %s:%s\"", "URLs. url_regex = spack.url.wildcard_version(aurl) # We'll be a bit more liberal and just", "a HEAD request first to check the content type. This lets # us", "URL for a listing of archives. Spack will scrape these pages for download", "of the regex to prevent # Spack from picking up signature files like:", "changed else headers except (AttributeError, TypeError, ValueError): pass try: changed = False new_list", "the only difference between the two is how the first letters of each", "the hrefs on the links. Good enough for a really simple spider. \"\"\"", "and any # explicitly listed list_url in the package list_urls = set() if", "any(re.search(r, url) for r in regexes): try: ver = spack.url.parse_version(url) versions[ver] = url", "for L, sep, M in product('Ll', _separators, 'Mm')), \"Server\": set(('Server', 'server')) } class", "errno.EXDEV: # NOTE(opadron): The above move failed because it crosses # filesystem boundaries.", "import spack.util.crypto import spack.util.s3 as s3_util import spack.util.url as url_util from spack.util.compression import", "== 's3': s3 = s3_util.create_s3_session(url) from botocore.exceptions import ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path) return", "If we're not at max depth, follow links. if depth < max_depth: subcalls.append((abs_link,", "really simple spider. \"\"\" def __init__(self): HTMLParser.__init__(self) self.links = [] def handle_starttag(self, tag,", "**kwargs) def spider(root, depth=0): \"\"\"Gets web pages from a root URL. If depth", "ValueError): pass try: changed = False new_list = [] for item in headers:", "spider. \"\"\" def __init__(self): HTMLParser.__init__(self) self.links = [] def handle_starttag(self, tag, attrs): if", "new: return (new,) + headers[1:] return headers try: changed = False new_dict =", "or if the keys of multiple items in headers map to the same", "import multiprocessing.pool try: # Python 2 had these in the HTMLParser package. from", "10 # See docstring for standardize_header_names() _separators = ('', ' ', '_', '-')", "check the content type. This lets # us ignore tarballs and gigantic files.", "archive URLs regexes = [] for aurl in archive_urls: # This creates a", "\"Got an error parsing HTML.\" # Pre-2.7.3 Pythons in particular have rather prickly", "for better HTML parsing.\" tty.warn(msg, url, \"HTMLParseError: \" + str(e)) except Exception as", "how the first letters of each word are capitalized; whether words are separated;", "if local_path: os.remove(local_path) return if url.scheme == 's3': s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path)", "remote_file_path = url_util.local_file_path(remote_url) if remote_file_path is not None: mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path, remote_file_path)", "key after being standardized. In all other cases headers is returned unaltered. \"\"\"", "contains the resource represented by the URL try: read_from_url(url) return True except URLError:", "fetch. \"\"\" root = url_util.parse(root) pages, links = _spider(root, set(), root, 0, depth,", "content Length ... and any other header name, such as \"Content-encoding\", would not", "remote_file_path) except OSError as e: if e.errno == errno.EXDEV: # NOTE(opadron): The above", "as e: tty.debug(e) if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was unable to", "for standardized_spelling, other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()): if headers in other_spellings: if headers ==", "T in product('Cc', _separators, 'Tt')), \"Date\": set(('Date', 'date')), \"Last-modified\": set( ''.join((L, 'ast', sep,", "then delete the original. This operation # needs to be done in separate", "link_parser = LinkParser() subcalls = [] link_parser.feed(page) while link_parser.links: raw_link = link_parser.links.pop() abs_link", "standardized_spelling, other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()): if headers in other_spellings: if headers == standardized_spelling:", "__UNABLE_TO_VERIFY_SSL: # User wants SSL verification, but it cannot be provided. warn_no_ssl_cert_checking() else:", "for r in regexes): try: ver = spack.url.parse_version(url) versions[ver] = url except spack.url.UndetectableVersionError:", "Arguments: list_url (str or None): URL for a listing of archives. Spack will", "is not None: list_args['StartAfter'] = start_after result = client.list_objects_v2(**list_args) last_key = None if", "(_), or space ( ). Header names that cannot be mapped as described", "look like archive URLs. url_regex = spack.url.wildcard_version(aurl) # We'll be a bit more", "url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.path.exists(local_path) if url.scheme == 's3': s3", "== errno.EXDEV: # NOTE(opadron): The above move failed because it crosses # filesystem", "*args, **kwargs): \"\"\"Wrapper for compatibility with old versions of Python.\"\"\" url = req", "encountered while visiting the pages. \"\"\" pages = {} # dict from page", "keys of multiple items in headers map to the same key after being", "the HTMLParser package. from HTMLParser import HTMLParser, HTMLParseError except ImportError: # In Python", "https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/' + url_regex # We need", "See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT)", "'ength')) for C, sep, L in product('Cc', _separators, 'Ll')), \"Content-type\": set( ''.join((C, 'ontent',", "not content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url), \" with content type \"", "key in ( os.path.relpath(entry['Key'], url.path) for entry in result['Contents'] ) if key !=", "+= r'(\\/download)?$' regexes.append(url_regex) # Build a dict version -> URL from any links", "the children, for much improved performance over a sequential fetch. \"\"\" root =", "with a context unless the URL scheme is one that uses # SSL", "def read_from_url(url, accept_content_type=None): url = url_util.parse(url) context = None verify_ssl = spack.config.get('config:verify_ssl') #", "set( ''.join((A, 'ccept', sep, R, 'anges')) for A, sep, R in product('Aa', _separators,", "are returned unaltered. For example: The standard spelling of \"Content-length\" would be substituted", "# Also, HTMLParseError is deprecated and never raised. class HTMLParseError(Exception): pass from llnl.util.filesystem", "to their full text. - links: set of links encountered while visiting the", "endpoint_url = os.environ.get('S3_ENDPOINT_URL') if not endpoint_url: return True if url_util.parse(endpoint_url, scheme='https').scheme == 'https':", "pages for download links that look like the archive URL. list_depth (int): Max", "of the URL. The capture group is converted # to a generic wildcard,", "content type. This lets # us ignore tarballs and gigantic files. # It", "different versions of a package. Typically these are just the tarballs from the", "considered is translated to one of the above names if the only difference", "certificates. Use this at your \" \"own risk.\") if raise_on_error: raise NoNetworkConnectionError(str(e), url)", "as err: if err.response['Error']['Code'] == 'NoSuchKey': return False raise err # otherwise, just", "from the package file itself. By default, this searches the parent directories of", "of links encountered while visiting the pages. \"\"\" pages = {} # dict", "for standardize_header_names() _separators = ('', ' ', '_', '-') HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\":", "that looks like an archive if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES): continue #", "import ALLOWED_ARCHIVE_TYPES # Timeout in seconds for web requests _timeout = 10 #", "codecs.getreader('utf-8')(response).read() pages[response_url] = page # Parse out the links in the page link_parser", "None else \"\", content_type or \"\")) return None, None, None return response.geturl(), response.headers,", "If headers is a sequence, then a new list is considered, where each", "== 's3': import spack.s3_handler opener = spack.s3_handler.open return opener(req, *args, **kwargs) def spider(root,", "0. \"\"\" if not isinstance(archive_urls, (list, tuple)): archive_urls = [archive_urls] # Generate a", "deprecated and never raised. class HTMLParseError(Exception): pass from llnl.util.filesystem import mkdirp import llnl.util.tty", "contents, key = _list_s3_objects( client, url, num_entries, start_after=key) for x in contents: yield", "least one of its elements differ from their corrsponding element in headers. If", "links def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape web pages for new versions of a", "spack.url.parse_version(url) versions[ver] = url except spack.url.UndetectableVersionError: continue return versions def standardize_header_names(headers): \"\"\"Replace certain", "page {0}{1}{2}\".format( url_util.format(url), \" with content type \" if content_type is not None", "= dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if start_after is not None: list_args['StartAfter'] = start_after", "True if url_util.parse(endpoint_url, scheme='https').scheme == 'https': return True return False __UNABLE_TO_VERIFY_SSL = (", "enough for a really simple spider. \"\"\" def __init__(self): HTMLParser.__init__(self) self.links = []", "old_key new_dict[key] = value return new_dict if changed else headers except (AttributeError, TypeError,", "ssl import sys import traceback from itertools import product import six from six.moves.urllib.request", "URLs regexes = [] for aurl in archive_urls: # This creates a regex", "itertools import product import six from six.moves.urllib.request import urlopen, Request from six.moves.urllib.error import", "% (type(e), e), traceback.format_exc()) return pages, links def _spider_wrapper(args): \"\"\"Wrapper for using spider", "import errno import re import os import os.path import shutil import ssl import", "the page link_parser = LinkParser() subcalls = [] link_parser.feed(page) while link_parser.links: raw_link =", "url_regex # We need to add a $ anchor to the end of", "Add '/' to the end of the URL. Some web servers require this.", "that the root links to. Returns a tuple of: - pages: dict of", "an internet connection.\"\"\" def __init__(self, message, url): super(NoNetworkConnectionError, self).__init__( \"No network connection: \"", "new dict is considered, where the key in each item is the key", "that differs from it, then a new tuple is returned. This tuple has", "while visiting the pages. \"\"\" pages = {} # dict from page URL", "multiprocessing.pool try: # Python 2 had these in the HTMLParser package. from HTMLParser", "# if you ask for a tarball with Accept: text/html. req.get_method = lambda:", "import product import six from six.moves.urllib.request import urlopen, Request from six.moves.urllib.error import URLError", "a page that look like archive URLs. url_regex = spack.url.wildcard_version(aurl) # We'll be", "TypeError: pass return headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack web spidering errors.\"\"\" class", "if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was unable to fetch url list", "seen on visited pages. try: response_url, _, response = read_from_url(url, 'text/html') if not", "In all other cases headers is returned unaltered. \"\"\" if isinstance(headers, six.string_types): for", "num_entries, start_after=None): list_args = dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if start_after is not None:", "client, url, num_entries, start_after=key) for x in contents: yield x if not key:", "url_util.local_file_path(url) if local_path: os.remove(local_path) return if url.scheme == 's3': s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket,", "picking up similarly named packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz #", "from their corrsponding element in headers. If headers is a mapping, then a", "pages.update(sub_pages) links.update(sub_links) finally: pool.terminate() pool.join() except URLError as e: tty.debug(e) if hasattr(e, 'reason')", "url.scheme == 's3': s3 = s3_util.create_s3_session(url) return list(set( key.split('/', 1)[0] for key in", "it was only introduced starting # with versions 2.7.9 and 3.4.3 of Python.", "\"\"\"Replace certain header names with standardized spellings. Standardizes the spellings of the following", "is_web_url = url.scheme in ('http', 'https') if accept_content_type and is_web_url: # Make a", "If the keys of multiple items in headers map to the same key", "for new versions of a tarball. Arguments: archive_urls (str or list or tuple):", "new = standardize_header_names(old) if old is not new: return (new,) + headers[1:] return", "If headers is a non-empty tuple, headers[0] is a string, and there exists", "altered, regardless of spelling. If headers is a string, then it (or an", "and verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url) if remote_file_path is not None:", "parser sucks. msg = \"Got an error parsing HTML.\" # Pre-2.7.3 Pythons in", "== standardized_spelling: return headers return standardized_spelling return headers if isinstance(headers, tuple): if not", "None or not content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url), \" with content", "# This creates a regex from the URL with a capture group for", "Livermore National Security, LLC and other # Spack Project Developers. See the top-level", "except the first element is the standardized spelling for headers[0]. If headers is", "url_util.local_file_path(remote_url) if remote_file_path is not None: mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path, remote_file_path) else: try:", "it *can* be provided. context = ssl.create_default_context() else: # User has explicitly indicated", "or item is not old_item new_list.append(item) return new_list if changed else headers except", "is a sequence, then a new list is considered, where each element is", "for any of the following names: - Content-length - content_length - contentlength -", "+= \" Use Python 2.7.3 or newer for better HTML parsing.\" tty.warn(msg, url,", "import llnl.util.tty as tty import spack.cmd import spack.config import spack.error import spack.url import", "if url.scheme == 's3': s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return # Don't even", "M, 'odified')) for L, sep, M in product('Ll', _separators, 'Mm')), \"Server\": set(('Server', 'server'))", "for much improved performance over a sequential fetch. \"\"\" root = url_util.parse(root) pages,", "sep, R in product('Aa', _separators, 'Rr')), \"Content-length\": set( ''.join((C, 'ontent', sep, L, 'ength'))", "archives. Spack will scrape these pages for download links that look like the", "then a new dict is considered, where the key in each item is", "if you ask for a tarball with Accept: text/html. req.get_method = lambda: \"HEAD\"", "'-') HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\": set( ''.join((A, 'ccept', sep, R, 'anges')) for A,", "url, num_entries, start_after=key) for x in contents: yield x if not key: break", "try: os.rename(local_file_path, remote_file_path) except OSError as e: if e.errno == errno.EXDEV: # NOTE(opadron):", "not None: mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path, remote_file_path) else: try: os.rename(local_file_path, remote_file_path) except OSError", "so pools can have sub-pools.\"\"\" @property def daemon(self): return False @daemon.setter def daemon(self,", "url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: os.remove(local_path) return if url.scheme == 's3': s3", "spawn processes to fetch the children, for much improved performance over a sequential", "headers map to the same key after being standardized. In all other cases", "if accept_content_type and not is_web_url: content_type = response.headers.get('Content-type') reject_content_type = ( accept_content_type and", "TypeError, ValueError): pass try: changed = False new_list = [] for item in", "risk.\") if raise_on_error: raise NoNetworkConnectionError(str(e), url) except HTMLParseError as e: # This error", "any # explicitly listed list_url in the package list_urls = set() if list_url", "import os import os.path import shutil import ssl import sys import traceback from", "web pages for new versions of a tarball. Arguments: archive_urls (str or list", "in headers: if isinstance(item, (tuple, six.string_types)): old_item, item = item, standardize_header_names(item) changed =", "tarballs from the package file itself. By default, this searches the parent directories", "Date - Last-modified - Server Every name considered is translated to one of", "headers[0] if isinstance(old, six.string_types): new = standardize_header_names(old) if old is not new: return", "(list, tuple)): archive_urls = [archive_urls] # Generate a list of list_urls based on", "content_type = resp.headers.get('Content-type') # Do the real GET request when we know it's", "any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES): continue # Skip things outside the root directory", "the max depth of links to follow from the root. Prints out a", "can't be fetched; it ignores errors with pages that the root links to.", "if raise_on_error: raise NoNetworkConnectionError(str(e), url) except HTMLParseError as e: # This error indicates", "don't pass 'context' parameter because it was only introduced starting # with versions", "for C, sep, T in product('Cc', _separators, 'Tt')), \"Date\": set(('Date', 'date')), \"Last-modified\": set(", "= url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.listdir(local_path) if url.scheme == 's3':", "additional_list_urls # Grab some web pages to scrape. pages = {} links =", "URL or sequence of URLs for different versions of a package. Typically these", "URL and any pages it links to up to max_depth. depth should initially", "2 had these in the HTMLParser package. from HTMLParser import HTMLParser, HTMLParseError except", "if abs_link in visited: continue # If we're not at max depth, follow", "or tuple): URL or sequence of URLs for different versions of a package.", "traceback.format_exc()) return pages, links def _spider_wrapper(args): \"\"\"Wrapper for using spider with multiprocessing.\"\"\" return", "If depth is specified (e.g., depth=2), then this will also follow up to", "and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was unable to fetch url list due to a", "'/') list_urls |= additional_list_urls # Grab some web pages to scrape. pages =", "to <depth> levels of links from the root. This will spawn processes to", "if sys.version_info[0] < 3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" Process =", "response_url, _, response = read_from_url(url, 'text/html') if not response_url or not response: return", "The capture group is converted # to a generic wildcard, so we can", "# We need to add a $ anchor to the end of the", "It would be nice to do this with the HTTP Accept header to", "pages to scrape. pages = {} links = set() for lurl in list_urls:", "def _list_s3_objects(client, url, num_entries, start_after=None): list_args = dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if start_after", "to enable certificate verification.\") def push_to_url(local_file_path, remote_path, **kwargs): keep_original = kwargs.get('keep_original', True) remote_url", "\"\"\" root = url_util.parse(root) pages, links = _spider(root, set(), root, 0, depth, False)", "== 's3': s3 = s3_util.create_s3_session(url) return list(set( key.split('/', 1)[0] for key in _iter_s3_prefix(s3,", "try: url = url.get_full_url() except AttributeError: pass # We don't pass 'context' parameter", "header # if you ask for a tarball with Accept: text/html. req.get_method =", "headers is a sequence, then a new list is considered, where each element", "# non-throwing response contains the resource represented by the URL try: read_from_url(url) return", "class NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" def", "User wants SSL verification, but it cannot be provided. warn_no_ssl_cert_checking() else: # User", "'/download'. url_regex += r'(\\/download)?$' regexes.append(url_regex) # Build a dict version -> URL from", "s3_util.create_s3_session(url) from botocore.exceptions import ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path) return True except ClientError as", "HTMLParseError(Exception): pass from llnl.util.filesystem import mkdirp import llnl.util.tty as tty import spack.cmd import", "will scrape these pages for download links that look like the archive URL.", "import spack.util.s3 as s3_util import spack.util.url as url_util from spack.util.compression import ALLOWED_ARCHIVE_TYPES #", "_spider_wrapper(args): \"\"\"Wrapper for using spider with multiprocessing.\"\"\" return _spider(*args) def _urlopen(req, *args, **kwargs):", "sub_links in results: pages.update(sub_pages) links.update(sub_links) finally: pool.terminate() pool.join() except URLError as e: tty.debug(e)", "URL. Some web servers require this. additional_list_urls = set() for lurl in list_urls:", "of links from the root. This will spawn processes to fetch the children,", "be provided. context = ssl.create_default_context() else: # User has explicitly indicated that they", "dict from page URL -> text content. links = set() # set of", "any of the following names: - Content-length - content_length - contentlength - content_Length", "archive_url links first. # Any conflicting versions will be overwritten by the list_url", "dict is considered, where the key in each item is the key of", "isinstance(headers, tuple): if not headers: return headers old = headers[0] if isinstance(old, six.string_types):", "return _spider(*args) def _urlopen(req, *args, **kwargs): \"\"\"Wrapper for compatibility with old versions of", "\" \"verification problem. You can try running spack -k, \" \"which will not", "((3,) < pyver < (3, 4, 3)) ))(sys.version_info) def read_from_url(url, accept_content_type=None): url =", "it, then a new tuple is returned. This tuple has the same elements", "raise err # otherwise, just try to \"read\" from the URL, and assume", "return False raise err # otherwise, just try to \"read\" from the URL,", "stuff that looks like an archive if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES): continue", "performance over a sequential fetch. \"\"\" root = url_util.parse(root) pages, links = _spider(root,", "you ask for a tarball with Accept: text/html. req.get_method = lambda: \"HEAD\" resp", "if not response_url or not response: return pages, links page = codecs.getreader('utf-8')(response).read() pages[response_url]", "if content_type is not None else \"\", content_type or \"\")) return None, None,", "result = client.list_objects_v2(**list_args) last_key = None if result['IsTruncated']: last_key = result['Contents'][-1]['Key'] iter =", "HTTP_HEADER_NAME_ALIASES.items()): if headers in other_spellings: if headers == standardized_spelling: return headers return standardized_spelling", "C, sep, T in product('Cc', _separators, 'Tt')), \"Date\": set(('Date', 'date')), \"Last-modified\": set( ''.join((L,", "just HTML. req.get_method = lambda: \"GET\" response = _urlopen(req, timeout=_timeout, context=context) if accept_content_type", "new_dict = {} for key, value in headers.items(): if isinstance(key, (tuple, six.string_types)): old_key,", "links that look like the archive URL. list_depth (int): Max depth to follow", "a new list is considered, where each element is its corresponding element in", "one of the above names if the only difference between the two is", "uses_ssl(parsed_url): if parsed_url.scheme == 'https': return True if parsed_url.scheme == 's3': endpoint_url =", "beginning of the regex to prevent # Spack from picking up similarly named", "links seen on visited pages. try: response_url, _, response = read_from_url(url, 'text/html') if", "map to the same key after being standardized, then the value for the", "do this with the HTTP Accept header to avoid # one round-trip. However,", "signature files like: # .asc # .md5 # .sha256 # .sig # However,", "space ( ). Header names that cannot be mapped as described above are", "= resp.headers.get('Content-type') # Do the real GET request when we know it's just", "s3_util import spack.util.url as url_util from spack.util.compression import ALLOWED_ARCHIVE_TYPES # Timeout in seconds", "parsed_url.scheme == 'https': return True if parsed_url.scheme == 's3': endpoint_url = os.environ.get('S3_ENDPOINT_URL') if", "= pool.map(_spider_wrapper, subcalls) for sub_pages, sub_links in results: pages.update(sub_pages) links.update(sub_links) finally: pool.terminate() pool.join()", "on list_url pages. Defaults to 0. \"\"\" if not isinstance(archive_urls, (list, tuple)): archive_urls", "sucks. msg = \"Got an error parsing HTML.\" # Pre-2.7.3 Pythons in particular", "ask for a tarball with Accept: text/html. req.get_method = lambda: \"HEAD\" resp =", "Defaults to 0. \"\"\" if not isinstance(archive_urls, (list, tuple)): archive_urls = [archive_urls] #", "*can* be provided. context = ssl.create_default_context() else: # User has explicitly indicated that", "HTMLParser.__init__(self) self.links = [] def handle_starttag(self, tag, attrs): if tag == 'a': for", "top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from __future__", "as \"Content-encoding\", would not be altered, regardless of spelling. If headers is a", "URL. If depth is specified (e.g., depth=2), then this will also follow up", "also follow up to <depth> levels of links from the root. This will", "context unless the URL scheme is one that uses # SSL certs. if", "{}) remote_path = remote_url.path while remote_path.startswith('/'): remote_path = remote_path[1:] s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path,", "# We'll be a bit more liberal and just look for the archive", "add a / to the beginning of the regex to prevent # Spack", "sorted(links): if any(re.search(r, url) for r in regexes): try: ver = spack.url.parse_version(url) versions[ver]", "element in headers. If headers is a mapping, then a new dict is", "try: read_from_url(url) return True except URLError: return False def remove_url(url): url = url_util.parse(url)", "docstring for standardize_header_names() _separators = ('', ' ', '_', '-') HTTP_HEADER_NAME_ALIASES = {", "of their corresponding item in headers, or if the keys of multiple items", "list is considered, where each element is its corresponding element in headers, but", "for key in _iter_s3_prefix(s3, url))) def _spider(url, visited, root, depth, max_depth, raise_on_error): \"\"\"Fetches", "gigantic files. # It would be nice to do this with the HTTP", "= url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.path.exists(local_path) if url.scheme == 's3':", "are completely ignored, except in debug mode. tty.debug(\"Error in _spider: %s:%s\" % (type(e),", "certain header names with standardized spellings. Standardizes the spellings of the following header", "new_list.append(item) return new_list if changed else headers except TypeError: pass return headers class", "names that cannot be mapped as described above are returned unaltered. For example:", "Content-type - Date - Last-modified - Server Every name considered is translated to", "in archive_urls + sorted(links): if any(re.search(r, url) for r in regexes): try: ver", "that uses # SSL certs. if uses_ssl(url): if verify_ssl: if __UNABLE_TO_VERIFY_SSL: # User", "and it *can* be provided. context = ssl.create_default_context() else: # User has explicitly", "out the hrefs on the links. Good enough for a really simple spider.", "# .sig # However, SourceForge downloads still need to end in '/download'. url_regex", "root. This will spawn processes to fetch the children, for much improved performance", "group for # the version part of the URL. The capture group is", "= [] link_parser.feed(page) while link_parser.links: raw_link = link_parser.links.pop() abs_link = url_util.join( response_url, raw_link.strip(),", "or not content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url), \" with content type", "the archive URL. list_depth (int): Max depth to follow links on list_url pages.", "is specified (e.g., depth=2), then this will also follow up to <depth> levels", "other header name, such as \"Content-encoding\", would not be altered, regardless of spelling.", "try: response_url, _, response = read_from_url(url, 'text/html') if not response_url or not response:", "URLError as e: tty.debug(e) if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was unable", "and max_depth is the max depth of links to follow from the root.", "their corrsponding element in headers. If headers is a mapping, then a new", "= os.environ.get('S3_ENDPOINT_URL') if not endpoint_url: return True if url_util.parse(endpoint_url, scheme='https').scheme == 'https': return", "shutil import ssl import sys import traceback from itertools import product import six", "Max depth to follow links on list_url pages. Defaults to 0. \"\"\" if", "for sub_pages, sub_links in results: pages.update(sub_pages) links.update(sub_links) finally: pool.terminate() pool.join() except URLError as", "except URLError as e: tty.debug(e) if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was", "s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args) if not keep_original: os.remove(local_file_path) else: raise", "old = headers[0] if isinstance(old, six.string_types): new = standardize_header_names(old) if old is not", "\"\"\"Process that allows sub-processes, so pools can have sub-pools.\"\"\" @property def daemon(self): return", "mapping, then a new dict is considered, where the key in each item", "is not new: return (new,) + headers[1:] return headers try: changed = False", "= req try: url = url.get_full_url() except AttributeError: pass # We don't pass", "url_util.format(url), \" with content type \" if content_type is not None else \"\",", "pass if sys.version_info[0] < 3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" Process", ".md5 # .sha256 # .sig # However, SourceForge downloads still need to end", "= {} for url in archive_urls + sorted(links): if any(re.search(r, url) for r", "tty.warn(\"Spack will not check SSL certificates. You need to update \" \"your Python", "url_regex += r'(\\/download)?$' regexes.append(url_regex) # Build a dict version -> URL from any", "|= additional_list_urls # Grab some web pages to scrape. pages = {} links", "like archive URLs. url_regex = spack.url.wildcard_version(aurl) # We'll be a bit more liberal", "follow up to <depth> levels of links from the root. This will spawn", "max_depth. depth should initially be zero, and max_depth is the max depth of", "resp.headers.get('Content-type') # Do the real GET request when we know it's just HTML.", "at your \" \"own risk.\") if raise_on_error: raise NoNetworkConnectionError(str(e), url) except HTMLParseError as", "return list(set( key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url))) def _spider(url, visited, root,", "Pre-2.7.3 Pythons in particular have rather prickly HTML parsing. if sys.version_info[:3] < (2,", "accept_content_type and not is_web_url: content_type = response.headers.get('Content-type') reject_content_type = ( accept_content_type and (", "url_exists(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.path.exists(local_path) if url.scheme", "standardized, then the value for the resulting item is undefined. The new dict", "codecs import errno import re import os import os.path import shutil import ssl", "if not endpoint_url: return True if url_util.parse(endpoint_url, scheme='https').scheme == 'https': return True return", "if 'context' in kwargs: del kwargs['context'] opener = urlopen if url_util.parse(url).scheme == 's3':", "\"Accept-ranges\": set( ''.join((A, 'ccept', sep, R, 'anges')) for A, sep, R in product('Aa',", "Use this at your \" \"own risk.\") if raise_on_error: raise NoNetworkConnectionError(str(e), url) except", "processes\"\"\" def __init__(self, *args, **kwargs): kwargs['context'] = NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs) def uses_ssl(parsed_url):", "a sequential fetch. \"\"\" root = url_util.parse(root) pages, links = _spider(root, set(), root,", "a listing of archives. Spack will scrape these pages for download links that", "with pages that the root links to. Returns a tuple of: - pages:", "# Spack Project Developers. See the top-level COPYRIGHT file for details. # #", "not want SSL # verification. context = ssl._create_unverified_context() req = Request(url_util.format(url)) content_type =", "header name, such as \"Content-encoding\", would not be altered, regardless of spelling. If", "True except URLError: return False def remove_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url)", "!= '.') return iter, last_key def _iter_s3_prefix(client, url, num_entries=1024): key = None while", "class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" def __init__(self, *args, **kwargs): kwargs['context'] =", "conflicting versions will be overwritten by the list_url links. versions = {} for", "client.list_objects_v2(**list_args) last_key = None if result['IsTruncated']: last_key = result['Contents'][-1]['Key'] iter = (key for", "tag == 'a': for attr, val in attrs: if attr == 'href': self.links.append(val)", "= url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: os.remove(local_path) return if url.scheme == 's3':", "import urlopen, Request from six.moves.urllib.error import URLError import multiprocessing.pool try: # Python 2", "\"verification problem. You can try running spack -k, \" \"which will not check", "key after being standardized, then the value for the resulting item is undefined.", "new_dict if changed else headers except (AttributeError, TypeError, ValueError): pass try: changed =", "class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an operation can't get an internet connection.\"\"\" def __init__(self,", "to \"read\" from the URL, and assume that *any* # non-throwing response contains", "local_path: return os.listdir(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) return list(set( key.split('/',", "def url_exists(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.path.exists(local_path) if", "of its corresponding item in headers, mapped as above if a string or", "not check SSL certificates. You need to update \" \"your Python to enable", "def __init__(self, message, url): super(NoNetworkConnectionError, self).__init__( \"No network connection: \" + str(message), \"URL", "names: - Content-length - content_length - contentlength - content_Length - contentLength - content", "names if the only difference between the two is how the first letters", "{ \"Accept-ranges\": set( ''.join((A, 'ccept', sep, R, 'anges')) for A, sep, R in", "pages, links def _spider_wrapper(args): \"\"\"Wrapper for using spider with multiprocessing.\"\"\" return _spider(*args) def", "if headers in other_spellings: if headers == standardized_spelling: return headers return standardized_spelling return", "lurl.endswith('/'): additional_list_urls.add(lurl + '/') list_urls |= additional_list_urls # Grab some web pages to", "msg += \" Use Python 2.7.3 or newer for better HTML parsing.\" tty.warn(msg,", "or not response: return pages, links page = codecs.getreader('utf-8')(response).read() pages[response_url] = page #", "if keep_original: shutil.copy(local_file_path, remote_file_path) else: try: os.rename(local_file_path, remote_file_path) except OSError as e: if", "your \" \"own risk.\") if raise_on_error: raise NoNetworkConnectionError(str(e), url) except HTMLParseError as e:", "or sequence of URLs for different versions of a package. Typically these are", "for the archive # part, not the full path. url_regex = os.path.basename(url_regex) #", "import HTMLParser, HTMLParseError except ImportError: # In Python 3, things moved to html.parser", "isinstance(archive_urls, (list, tuple)): archive_urls = [archive_urls] # Generate a list of list_urls based", "the value for the resulting item is undefined. The new dict is returned", "or key is not old_key new_dict[key] = value return new_dict if changed else", "lnk = spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk) # Scrape them for archive URLs regexes", "other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()): if headers in other_spellings: if headers == standardized_spelling: return", "{SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return", "and never raised. class HTMLParseError(Exception): pass from llnl.util.filesystem import mkdirp import llnl.util.tty as", "= url_util.parse(url) context = None verify_ssl = spack.config.get('config:verify_ssl') # Don't even bother with", "the header # if you ask for a tarball with Accept: text/html. req.get_method", "remove_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: os.remove(local_path) return if url.scheme", "= key, standardize_header_names(key) changed = changed or key is not old_key new_dict[key] =", "= {} links = set() for lurl in list_urls: pg, lnk = spider(lurl,", "( content_type is None or not content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url),", "the same elements as headers, except the first element is the standardized spelling", "os.path import shutil import ssl import sys import traceback from itertools import product", "and gigantic files. # It would be nice to do this with the", "Other types of errors are completely ignored, except in debug mode. tty.debug(\"Error in", "ignored, except in debug mode. tty.debug(\"Error in _spider: %s:%s\" % (type(e), e), traceback.format_exc())", "key of its corresponding item in headers, mapped as above if a string", "after being standardized, then the value for the resulting item is undefined. The", "dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if start_after is not None: list_args['StartAfter'] = start_after result", "the keys of multiple items in headers map to the same key after", "map to the same key after being standardized. In all other cases headers", "list due to a certificate \" \"verification problem. You can try running spack", "for aurl in archive_urls: # This creates a regex from the URL with", "verify_ssl: if __UNABLE_TO_VERIFY_SSL: # User wants SSL verification, but it cannot be provided.", "def standardize_header_names(headers): \"\"\"Replace certain header names with standardized spellings. Standardizes the spellings of", "if tag == 'a': for attr, val in attrs: if attr == 'href':", "import mkdirp import llnl.util.tty as tty import spack.cmd import spack.config import spack.error import", "would be substituted for any of the following names: - Content-length - content_length", "<depth> levels of links from the root. This will spawn processes to fetch", "headers, except the first element is the standardized spelling for headers[0]. If headers", "newer for better HTML parsing.\" tty.warn(msg, url, \"HTMLParseError: \" + str(e)) except Exception", "(tuple, six.string_types)): old_key, key = key, standardize_header_names(key) changed = changed or key is", "substitute) is returned. If headers is a non-empty tuple, headers[0] is a string,", "= None is_web_url = url.scheme in ('http', 'https') if accept_content_type and is_web_url: #", "_urlopen(req, timeout=_timeout, context=context) content_type = resp.headers.get('Content-type') # Do the real GET request when", "x if not key: break def list_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url)", "|= spack.url.find_list_urls(aurl) # Add '/' to the end of the URL. Some web", "list_urls |= additional_list_urls # Grab some web pages to scrape. pages = {}", "# .asc # .md5 # .sha256 # .sig # However, SourceForge downloads still", "being standardized. In all other cases headers is returned unaltered. \"\"\" if isinstance(headers,", "headers. If headers is a mapping, then a new dict is considered, where", "rather prickly HTML parsing. if sys.version_info[:3] < (2, 7, 3): msg += \"", "and any pages it links to up to max_depth. depth should initially be", "not lurl.endswith('/'): additional_list_urls.add(lurl + '/') list_urls |= additional_list_urls # Grab some web pages", "None is_web_url = url.scheme in ('http', 'https') if accept_content_type and is_web_url: # Make", "to one of the above names if the only difference between the two", "remote_url.scheme == 's3': extra_args = kwargs.get('extra_args', {}) remote_path = remote_url.path while remote_path.startswith('/'): remote_path", "there exists a standardized spelling for header[0] that differs from it, then a", "as tty import spack.cmd import spack.config import spack.error import spack.url import spack.util.crypto import", "same key after being standardized. In all other cases headers is returned unaltered.", "= kwargs.get('keep_original', True) remote_url = url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and verify_ssl", "warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url) if remote_file_path is not None: mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path,", "'/' to the end of the URL. Some web servers require this. additional_list_urls", "the first letters of each word are capitalized; whether words are separated; or,", "Do the real GET request when we know it's just HTML. req.get_method =", "spack.error import spack.url import spack.util.crypto import spack.util.s3 as s3_util import spack.util.url as url_util", "each word are capitalized; whether words are separated; or, if separated, whether they", "details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from __future__ import print_function import codecs", "raw_link = link_parser.links.pop() abs_link = url_util.join( response_url, raw_link.strip(), resolve_href=True) links.add(abs_link) # Skip stuff", "spack.url import spack.util.crypto import spack.util.s3 as s3_util import spack.util.url as url_util from spack.util.compression", "\"\"\"This parser just takes an HTML page and strips out the hrefs on", "verify_ssl = spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url)", "standardized_spelling return headers if isinstance(headers, tuple): if not headers: return headers old =", "where the key in each item is the key of its corresponding item", "\" \"your Python to enable certificate verification.\") def push_to_url(local_file_path, remote_path, **kwargs): keep_original =", "= set() if list_url is not None: list_urls.add(list_url) for aurl in archive_urls: list_urls", "url except spack.url.UndetectableVersionError: continue return versions def standardize_header_names(headers): \"\"\"Replace certain header names with", "list_url pages. Defaults to 0. \"\"\" if not isinstance(archive_urls, (list, tuple)): archive_urls =", "try: ver = spack.url.parse_version(url) versions[ver] = url except spack.url.UndetectableVersionError: continue return versions def", "of a tarball. Arguments: archive_urls (str or list or tuple): URL or sequence", "if isinstance(key, (tuple, six.string_types)): old_key, key = key, standardize_header_names(key) changed = changed or", "new list is considered, where each element is its corresponding element in headers,", "the root directory if not abs_link.startswith(root): continue # Skip already-visited links if abs_link", "class LinkParser(HTMLParser): \"\"\"This parser just takes an HTML page and strips out the", "spider(root, depth=0): \"\"\"Gets web pages from a root URL. If depth is specified", "like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/' +", "OSError as e: if e.errno == errno.EXDEV: # NOTE(opadron): The above move failed", "tuple of: - pages: dict of pages visited (URL) mapped to their full", "Process = NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that", "to a generic wildcard, so we can use this to extract things #", "We need to add a / to the beginning of the regex to", "file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from __future__ import print_function", "the root can't be fetched; it ignores errors with pages that the root", "errors are completely ignored, except in debug mode. tty.debug(\"Error in _spider: %s:%s\" %", "keep_original: os.remove(local_file_path) else: raise NotImplementedError( 'Unrecognized URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url): url", "e: # This error indicates that Python's HTML parser sucks. msg = \"Got", "element in headers, but mapped as above if a string or tuple. This", "headers == standardized_spelling: return headers return standardized_spelling return headers if isinstance(headers, tuple): if", "# Timeout in seconds for web requests _timeout = 10 # See docstring", "for a really simple spider. \"\"\" def __init__(self): HTMLParser.__init__(self) self.links = [] def", "ignore the header # if you ask for a tarball with Accept: text/html.", "\"read\" from the URL, and assume that *any* # non-throwing response contains the", "\"\"\" def __init__(self): HTMLParser.__init__(self) self.links = [] def handle_starttag(self, tag, attrs): if tag", "return if url.scheme == 's3': s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return # Don't", "would not be altered, regardless of spelling. If headers is a string, then", "servers require this. additional_list_urls = set() for lurl in list_urls: if not lurl.endswith('/'):", "dict is returned if at least one of its items has a key", "error indicates that Python's HTML parser sucks. msg = \"Got an error parsing", "part of the URL. The capture group is converted # to a generic", "SSL verification, but it cannot be provided. warn_no_ssl_cert_checking() else: # User wants SSL", "by the list_url links. versions = {} for url in archive_urls + sorted(links):", "HTMLParseError except ImportError: # In Python 3, things moved to html.parser from html.parser", "push_to_url(local_file_path, remote_path, **kwargs): keep_original = kwargs.get('keep_original', True) remote_url = url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl')", "= [] def handle_starttag(self, tag, attrs): if tag == 'a': for attr, val", "', '_', '-') HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\": set( ''.join((A, 'ccept', sep, R, 'anges'))", "for key in ( os.path.relpath(entry['Key'], url.path) for entry in result['Contents'] ) if key", "= spack.url.wildcard_version(aurl) # We'll be a bit more liberal and just look for", "resulting item is undefined. The new dict is returned if at least one", "links = set() for lurl in list_urls: pg, lnk = spider(lurl, depth=list_depth) pages.update(pg)", "Content-length - Content-type - Date - Last-modified - Server Every name considered is", "would be nice to do this with the HTTP Accept header to avoid", "None, None, None return response.geturl(), response.headers, response def warn_no_ssl_cert_checking(): tty.warn(\"Spack will not check", "= {} # dict from page URL -> text content. links = set()", "url = url.get_full_url() except AttributeError: pass # We don't pass 'context' parameter because", "Request(url_util.format(url)) content_type = None is_web_url = url.scheme in ('http', 'https') if accept_content_type and", "Pythons in particular have rather prickly HTML parsing. if sys.version_info[:3] < (2, 7,", "versions of Python.\"\"\" url = req try: url = url.get_full_url() except AttributeError: pass", "pages. try: response_url, _, response = read_from_url(url, 'text/html') if not response_url or not", "# User has explicitly indicated that they do not want SSL # verification.", "the package list_urls = set() if list_url is not None: list_urls.add(list_url) for aurl", "archive URLs. url_regex = spack.url.wildcard_version(aurl) # We'll be a bit more liberal and", "links. Good enough for a really simple spider. \"\"\" def __init__(self): HTMLParser.__init__(self) self.links", "itself. By default, this searches the parent directories of archives. Keyword Arguments: list_url", "URL. list_depth (int): Max depth to follow links on list_url pages. Defaults to", "Spack web spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an operation can't get an", "\" \"own risk.\") if raise_on_error: raise NoNetworkConnectionError(str(e), url) except HTMLParseError as e: #", "You need to update \" \"your Python to enable certificate verification.\") def push_to_url(local_file_path,", "url = req try: url = url.get_full_url() except AttributeError: pass # We don't", "# Add '/' to the end of the URL. Some web servers require", "it (or an appropriate substitute) is returned. If headers is a non-empty tuple,", "set( ''.join((C, 'ontent', sep, T, 'ype')) for C, sep, T in product('Cc', _separators,", "capture group is converted # to a generic wildcard, so we can use", "= url_util.join( response_url, raw_link.strip(), resolve_href=True) links.add(abs_link) # Skip stuff that looks like an", "assume that *any* # non-throwing response contains the resource represented by the URL", "these are just the tarballs from the package file itself. By default, this", "Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR", "from six.moves.urllib.request import urlopen, Request from six.moves.urllib.error import URLError import multiprocessing.pool try: #", "import spack.util.url as url_util from spack.util.compression import ALLOWED_ARCHIVE_TYPES # Timeout in seconds for", "= s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args) if not keep_original: os.remove(local_file_path) else: raise NotImplementedError(", "is returned. This tuple has the same elements as headers, except the first", "and strips out the hrefs on the links. Good enough for a really", "real GET request when we know it's just HTML. req.get_method = lambda: \"GET\"", "tuple)): archive_urls = [archive_urls] # Generate a list of list_urls based on archive", "request first to check the content type. This lets # us ignore tarballs", "HEAD request first to check the content type. This lets # us ignore", "''.join((A, 'ccept', sep, R, 'anges')) for A, sep, R in product('Aa', _separators, 'Rr')),", "spack.config.get('config:verify_ssl') # Don't even bother with a context unless the URL scheme is", "uses non-daemon processes\"\"\" Process = NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess class", "- Last-modified - Server Every name considered is translated to one of the", "regexes): try: ver = spack.url.parse_version(url) versions[ver] = url except spack.url.UndetectableVersionError: continue return versions", "from the URL with a capture group for # the version part of", "above if a string or tuple. This new list is returned if at", "in debug mode. tty.debug(\"Error in _spider: %s:%s\" % (type(e), e), traceback.format_exc()) return pages,", "from picking up similarly named packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz", "err # otherwise, just try to \"read\" from the URL, and assume that", "Any conflicting versions will be overwritten by the list_url links. versions = {}", "pages. \"\"\" pages = {} # dict from page URL -> text content.", "any links that match the wildcards. # Walk through archive_url links first. #", "changed = False new_list = [] for item in headers: if isinstance(item, (tuple,", "content_type = None is_web_url = url.scheme in ('http', 'https') if accept_content_type and is_web_url:", "set(('Date', 'date')), \"Last-modified\": set( ''.join((L, 'ast', sep, M, 'odified')) for L, sep, M", "with versions 2.7.9 and 3.4.3 of Python. if 'context' in kwargs: del kwargs['context']", "needs to be done in separate steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif remote_url.scheme ==", "from HTMLParser import HTMLParser, HTMLParseError except ImportError: # In Python 3, things moved", "of: - pages: dict of pages visited (URL) mapped to their full text.", "Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if start_after is not None: list_args['StartAfter'] = start_after result =", "links to up to max_depth. depth should initially be zero, and max_depth is", "# User wants SSL verification, but it cannot be provided. warn_no_ssl_cert_checking() else: #", "return opener(req, *args, **kwargs) def spider(root, depth=0): \"\"\"Gets web pages from a root", "of archives. Keyword Arguments: list_url (str or None): URL for a listing of", "web pages from a root URL. If depth is specified (e.g., depth=2), then", "0, depth, False) return pages, links def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape web pages", "SSL verification, and it *can* be provided. context = ssl.create_default_context() else: # User", "spelling of \"Content-length\" would be substituted for any of the following names: -", "just look for the archive # part, not the full path. url_regex =", "versions of a tarball. Arguments: archive_urls (str or list or tuple): URL or", "scrape these pages for download links that look like the archive URL. list_depth", "response.headers.get('Content-type') reject_content_type = ( accept_content_type and ( content_type is None or not content_type.startswith(accept_content_type)))", "and there exists a standardized spelling for header[0] that differs from it, then", "wants SSL verification, and it *can* be provided. context = ssl.create_default_context() else: #", "directory if not abs_link.startswith(root): continue # Skip already-visited links if abs_link in visited:", "2.7.9 and 3.4.3 of Python. if 'context' in kwargs: del kwargs['context'] opener =", "root, depth + 1, max_depth, raise_on_error)) visited.add(abs_link) if subcalls: pool = NonDaemonPool(processes=len(subcalls)) try:", "(type(e), e), traceback.format_exc()) return pages, links def _spider_wrapper(args): \"\"\"Wrapper for using spider with", "subcalls: pool = NonDaemonPool(processes=len(subcalls)) try: results = pool.map(_spider_wrapper, subcalls) for sub_pages, sub_links in", "up similarly named packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz", "= spack.s3_handler.open return opener(req, *args, **kwargs) def spider(root, depth=0): \"\"\"Gets web pages from", "has explicitly indicated that they do not want SSL # verification. context =", "first letters of each word are capitalized; whether words are separated; or, if", "first. # Any conflicting versions will be overwritten by the list_url links. versions", "that allows sub-processes, so pools can have sub-pools.\"\"\" @property def daemon(self): return False", "context=context) if accept_content_type and not is_web_url: content_type = response.headers.get('Content-type') reject_content_type = ( accept_content_type", "new tuple is returned. This tuple has the same elements as headers, except", "GET request when we know it's just HTML. req.get_method = lambda: \"GET\" response", "'Rr')), \"Content-length\": set( ''.join((C, 'ontent', sep, L, 'ength')) for C, sep, L in", "try: # Python 2 had these in the HTMLParser package. from HTMLParser import", "non-throwing response contains the resource represented by the URL try: read_from_url(url) return True", "as headers, except the first element is the standardized spelling for headers[0]. If", "otherwise, just try to \"read\" from the URL, and assume that *any* #", "= remote_url.path while remote_path.startswith('/'): remote_path = remote_path[1:] s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path,", "if not isinstance(archive_urls, (list, tuple)): archive_urls = [archive_urls] # Generate a list of", "archive URL. list_depth (int): Max depth to follow links on list_url pages. Defaults", "hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was unable to fetch url list due", "except ImportError: # In Python 3, things moved to html.parser from html.parser import", "errors with pages that the root links to. Returns a tuple of: -", "from botocore.exceptions import ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path) return True except ClientError as err:", "'reason') and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was unable to fetch url list due to", "= NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses", "in contents: yield x if not key: break def list_url(url): url = url_util.parse(url)", "'NoSuchKey': return False raise err # otherwise, just try to \"read\" from the", "in headers, or if the keys of multiple items in headers map to", "parsing. if sys.version_info[:3] < (2, 7, 3): msg += \" Use Python 2.7.3", "Scrape them for archive URLs regexes = [] for aurl in archive_urls: #", "other # Spack Project Developers. See the top-level COPYRIGHT file for details. #", "changed = changed or item is not old_item new_list.append(item) return new_list if changed", "Key=url.path) return # Don't even try for other URL schemes. def _list_s3_objects(client, url,", "response: return pages, links page = codecs.getreader('utf-8')(response).read() pages[response_url] = page # Parse out", "certs. if uses_ssl(url): if verify_ssl: if __UNABLE_TO_VERIFY_SSL: # User wants SSL verification, but", "content_length - contentlength - content_Length - contentLength - content Length ... and any", "regexes.append(url_regex) # Build a dict version -> URL from any links that match", "HTML. req.get_method = lambda: \"GET\" response = _urlopen(req, timeout=_timeout, context=context) if accept_content_type and", "return False __UNABLE_TO_VERIFY_SSL = ( lambda pyver: ( (pyver < (2, 7, 9))", "remote_path, **kwargs): keep_original = kwargs.get('keep_original', True) remote_url = url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl') if", "print_function import codecs import errno import re import os import os.path import shutil", "returned unaltered. For example: The standard spelling of \"Content-length\" would be substituted for", "a non-empty tuple, headers[0] is a string, and there exists a standardized spelling", "of spelling. If headers is a string, then it (or an appropriate substitute)", "except OSError as e: if e.errno == errno.EXDEV: # NOTE(opadron): The above move", "initially be zero, and max_depth is the max depth of links to follow", "3, things moved to html.parser from html.parser import HTMLParser # Also, HTMLParseError is", "list_urls: pg, lnk = spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk) # Scrape them for archive", "return versions def standardize_header_names(headers): \"\"\"Replace certain header names with standardized spellings. Standardizes the", "result['Contents'] ) if key != '.') return iter, last_key def _iter_s3_prefix(client, url, num_entries=1024):", "return headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack web spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised", "a string or tuple. This new list is returned if at least one", "type. This lets # us ignore tarballs and gigantic files. # It would", "from six.moves.urllib.error import URLError import multiprocessing.pool try: # Python 2 had these in", "enable certificate verification.\") def push_to_url(local_file_path, remote_path, **kwargs): keep_original = kwargs.get('keep_original', True) remote_url =", "result['Contents'][-1]['Key'] iter = (key for key in ( os.path.relpath(entry['Key'], url.path) for entry in", "to the beginning of the regex to prevent # Spack from picking up", "root, depth, max_depth, raise_on_error): \"\"\"Fetches URL and any pages it links to up", "to extract things # on a page that look like archive URLs. url_regex", "= s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return # Don't even try for other URL schemes.", "done in separate steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif remote_url.scheme == 's3': extra_args =", "ImportError: # In Python 3, things moved to html.parser from html.parser import HTMLParser", "of the regex to prevent # Spack from picking up similarly named packages", "visiting the pages. \"\"\" pages = {} # dict from page URL ->", "= False new_list = [] for item in headers: if isinstance(item, (tuple, six.string_types)):", "it crosses # filesystem boundaries. Copy the file (plus original # metadata), and", "to ignore the header # if you ask for a tarball with Accept:", "raise_on_error): \"\"\"Fetches URL and any pages it links to up to max_depth. depth", "# Don't even bother with a context unless the URL scheme is one", "so we can use this to extract things # on a page that", "'https') if accept_content_type and is_web_url: # Make a HEAD request first to check", "url_regex = spack.url.wildcard_version(aurl) # We'll be a bit more liberal and just look", "return standardized_spelling return headers if isinstance(headers, tuple): if not headers: return headers old", "However, most servers seem to ignore the header # if you ask for", "opener = urlopen if url_util.parse(url).scheme == 's3': import spack.s3_handler opener = spack.s3_handler.open return", "six.string_types)): old_key, key = key, standardize_header_names(key) changed = changed or key is not", "ignore tarballs and gigantic files. # It would be nice to do this", "botocore.exceptions import ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path) return True except ClientError as err: if", "HTMLParseError as e: # This error indicates that Python's HTML parser sucks. msg", "so by a dash (-), underscore (_), or space ( ). Header names", "as url_util from spack.util.compression import ALLOWED_ARCHIVE_TYPES # Timeout in seconds for web requests", "3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" Process = NonDaemonProcess else: class", "return True return False __UNABLE_TO_VERIFY_SSL = ( lambda pyver: ( (pyver < (2,", "from the corresponding item. If the keys of multiple items in headers map", "standardized. In all other cases headers is returned unaltered. \"\"\" if isinstance(headers, six.string_types):", "message, url): super(NoNetworkConnectionError, self).__init__( \"No network connection: \" + str(message), \"URL was: \"", "'context' in kwargs: del kwargs['context'] opener = urlopen if url_util.parse(url).scheme == 's3': import", "\"GET\" response = _urlopen(req, timeout=_timeout, context=context) if accept_content_type and not is_web_url: content_type =", "# Other types of errors are completely ignored, except in debug mode. tty.debug(\"Error", "- content_Length - contentLength - content Length ... and any other header name,", "contentlength - content_Length - contentLength - content Length ... and any other header", "__init__(self, *args, **kwargs): kwargs['context'] = NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs) def uses_ssl(parsed_url): if parsed_url.scheme", "_spider(*args) def _urlopen(req, *args, **kwargs): \"\"\"Wrapper for compatibility with old versions of Python.\"\"\"", "# on a page that look like archive URLs. url_regex = spack.url.wildcard_version(aurl) #", "= s3_util.create_s3_session(url) from botocore.exceptions import ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path) return True except ClientError", "contents: yield x if not key: break def list_url(url): url = url_util.parse(url) local_path", "returned if at least one of its elements differ from their corrsponding element", "nice to do this with the HTTP Accept header to avoid # one", "things outside the root directory if not abs_link.startswith(root): continue # Skip already-visited links", "any pages it links to up to max_depth. depth should initially be zero,", "True except ClientError as err: if err.response['Error']['Code'] == 'NoSuchKey': return False raise err", "depth=2), then this will also follow up to <depth> levels of links from", "not be altered, regardless of spelling. If headers is a string, then it", "exists a standardized spelling for header[0] that differs from it, then a new", "headers: return headers old = headers[0] if isinstance(old, six.string_types): new = standardize_header_names(old) if", "HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\": set( ''.join((A, 'ccept', sep, R, 'anges')) for A, sep,", "based on archive urls and any # explicitly listed list_url in the package", "_urlopen(req, *args, **kwargs): \"\"\"Wrapper for compatibility with old versions of Python.\"\"\" url =", ".asc # .md5 # .sha256 # .sig # However, SourceForge downloads still need", "tty.debug(\"Error in _spider: %s:%s\" % (type(e), e), traceback.format_exc()) return pages, links def _spider_wrapper(args):", "mapped as described above are returned unaltered. For example: The standard spelling of", "{} for url in archive_urls + sorted(links): if any(re.search(r, url) for r in", "and other # Spack Project Developers. See the top-level COPYRIGHT file for details.", "AttributeError: pass # We don't pass 'context' parameter because it was only introduced", "and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url) if remote_file_path is not None: mkdirp(os.path.dirname(remote_file_path)) if", "a really simple spider. \"\"\" def __init__(self): HTMLParser.__init__(self) self.links = [] def handle_starttag(self,", "an appropriate substitute) is returned. If headers is a non-empty tuple, headers[0] is", "in kwargs: del kwargs['context'] opener = urlopen if url_util.parse(url).scheme == 's3': import spack.s3_handler", "archive_urls: list_urls |= spack.url.find_list_urls(aurl) # Add '/' to the end of the URL.", "Returns a tuple of: - pages: dict of pages visited (URL) mapped to", "+ '/') list_urls |= additional_list_urls # Grab some web pages to scrape. pages", "page link_parser = LinkParser() subcalls = [] link_parser.feed(page) while link_parser.links: raw_link = link_parser.links.pop()", "value is taken from the corresponding item. If the keys of multiple items", "it ignores errors with pages that the root links to. Returns a tuple", "we can use this to extract things # on a page that look", "the archive # part, not the full path. url_regex = os.path.basename(url_regex) # We", "s3 = s3_util.create_s3_session(url) from botocore.exceptions import ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path) return True except", "tuple is returned. This tuple has the same elements as headers, except the", "by a dash (-), underscore (_), or space ( ). Header names that", "set( ''.join((C, 'ontent', sep, L, 'ength')) for C, sep, L in product('Cc', _separators,", "= headers[0] if isinstance(old, six.string_types): new = standardize_header_names(old) if old is not new:", "= s3_util.create_s3_session(url) return list(set( key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url))) def _spider(url,", "HTMLParser, HTMLParseError except ImportError: # In Python 3, things moved to html.parser from", "for download links that look like the archive URL. list_depth (int): Max depth", "pages it links to up to max_depth. depth should initially be zero, and", "standardize_header_names(old) if old is not new: return (new,) + headers[1:] return headers try:", "an error parsing HTML.\" # Pre-2.7.3 Pythons in particular have rather prickly HTML", "@property def daemon(self): return False @daemon.setter def daemon(self, value): pass if sys.version_info[0] <", "(URL) mapped to their full text. - links: set of links encountered while", "= (key for key in ( os.path.relpath(entry['Key'], url.path) for entry in result['Contents'] )", "for headers[0]. If headers is a sequence, then a new list is considered,", "not isinstance(archive_urls, (list, tuple)): archive_urls = [archive_urls] # Generate a list of list_urls", "False new_dict = {} for key, value in headers.items(): if isinstance(key, (tuple, six.string_types)):", "set(), root, 0, depth, False) return pages, links def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape", "Last-modified - Server Every name considered is translated to one of the above", "connection.\"\"\" def __init__(self, message, url): super(NoNetworkConnectionError, self).__init__( \"No network connection: \" + str(message),", "to end in '/download'. url_regex += r'(\\/download)?$' regexes.append(url_regex) # Build a dict version", "crosses # filesystem boundaries. Copy the file (plus original # metadata), and then", "sequence of URLs for different versions of a package. Typically these are just", "LinkParser(HTMLParser): \"\"\"This parser just takes an HTML page and strips out the hrefs", "of \"Content-length\" would be substituted for any of the following names: - Content-length", "set() for lurl in list_urls: pg, lnk = spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk) #", "from html.parser import HTMLParser # Also, HTMLParseError is deprecated and never raised. class", "= NonDaemonPool(processes=len(subcalls)) try: results = pool.map(_spider_wrapper, subcalls) for sub_pages, sub_links in results: pages.update(sub_pages)", "< (3, 4, 3)) ))(sys.version_info) def read_from_url(url, accept_content_type=None): url = url_util.parse(url) context =", "def push_to_url(local_file_path, remote_path, **kwargs): keep_original = kwargs.get('keep_original', True) remote_url = url_util.parse(remote_path) verify_ssl =", "if not abs_link.startswith(root): continue # Skip already-visited links if abs_link in visited: continue", "headers[1:] return headers try: changed = False new_dict = {} for key, value", "above move failed because it crosses # filesystem boundaries. Copy the file (plus", "' ', '_', '-') HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\": set( ''.join((A, 'ccept', sep, R,", "is not None else \"\", content_type or \"\")) return None, None, None return", "SCHEME=remote_url.scheme)) def url_exists(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.path.exists(local_path)", "entry in result['Contents'] ) if key != '.') return iter, last_key def _iter_s3_prefix(client,", "URL scheme is one that uses # SSL certs. if uses_ssl(url): if verify_ssl:", "return True except ClientError as err: if err.response['Error']['Code'] == 'NoSuchKey': return False raise", "lurl in list_urls: pg, lnk = spider(lurl, depth=list_depth) pages.update(pg) links.update(lnk) # Scrape them", "corresponding item. If the keys of multiple items in headers map to the", "super(NoNetworkConnectionError, self).__init__( \"No network connection: \" + str(message), \"URL was: \" + str(url))", "headers: if isinstance(item, (tuple, six.string_types)): old_item, item = item, standardize_header_names(item) changed = changed", "is_web_url: # Make a HEAD request first to check the content type. This", "as e: # This error indicates that Python's HTML parser sucks. msg =", "'Mm')), \"Server\": set(('Server', 'server')) } class LinkParser(HTMLParser): \"\"\"This parser just takes an HTML", "See docstring for standardize_header_names() _separators = ('', ' ', '_', '-') HTTP_HEADER_NAME_ALIASES =", "url_util.parse(root) pages, links = _spider(root, set(), root, 0, depth, False) return pages, links", "even bother with a context unless the URL scheme is one that uses", "dict version -> URL from any links that match the wildcards. # Walk", "parsing.\" tty.warn(msg, url, \"HTMLParseError: \" + str(e)) except Exception as e: # Other", "for url in archive_urls + sorted(links): if any(re.search(r, url) for r in regexes):", "will be overwritten by the list_url links. versions = {} for url in", "items in headers map to the same key after being standardized. In all", "of all links seen on visited pages. try: response_url, _, response = read_from_url(url,", "\" Use Python 2.7.3 or newer for better HTML parsing.\" tty.warn(msg, url, \"HTMLParseError:", "url_regex = os.path.basename(url_regex) # We need to add a / to the beginning", "for item in headers: if isinstance(item, (tuple, six.string_types)): old_item, item = item, standardize_header_names(item)", "to max_depth. depth should initially be zero, and max_depth is the max depth", "undefined. The new dict is returned if at least one of its items", "Exception as e: # Other types of errors are completely ignored, except in", "file (plus original # metadata), and then delete the original. This operation #", "'https': return True return False __UNABLE_TO_VERIFY_SSL = ( lambda pyver: ( (pyver <", "uses_ssl(url): if verify_ssl: if __UNABLE_TO_VERIFY_SSL: # User wants SSL verification, but it cannot", "root = url_util.parse(root) pages, links = _spider(root, set(), root, 0, depth, False) return", "URL with a capture group for # the version part of the URL.", "not headers: return headers old = headers[0] if isinstance(old, six.string_types): new = standardize_header_names(old)", "package file itself. By default, this searches the parent directories of archives. Keyword", "content_type is None or not content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring page {0}{1}{2}\".format( url_util.format(url), \"", "list(set( key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url))) def _spider(url, visited, root, depth,", "NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" def __init__(self, *args, **kwargs): kwargs['context'] = NonDaemonContext()", "e: # Other types of errors are completely ignored, except in debug mode.", "in headers, mapped as above if a string or tuple. The value is", "Walk through archive_url links first. # Any conflicting versions will be overwritten by", "class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" Process = NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())):", "( accept_content_type and ( content_type is None or not content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring", "new_list = [] for item in headers: if isinstance(item, (tuple, six.string_types)): old_item, item", "# https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/' + url_regex # We need to add a", "if depth < max_depth: subcalls.append((abs_link, visited, root, depth + 1, max_depth, raise_on_error)) visited.add(abs_link)", "that uses non-daemon processes\"\"\" def __init__(self, *args, **kwargs): kwargs['context'] = NonDaemonContext() super(NonDaemonPool, self).__init__(*args,", "s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return # Don't even try for other URL schemes. def _list_s3_objects(client,", "headers[0]. If headers is a sequence, then a new list is considered, where", "that differs from that of their corresponding item in headers, or if the", "if result['IsTruncated']: last_key = result['Contents'][-1]['Key'] iter = (key for key in ( os.path.relpath(entry['Key'],", "'Ll')), \"Content-type\": set( ''.join((C, 'ontent', sep, T, 'ype')) for C, sep, T in", "then a new list is considered, where each element is its corresponding element", "if local_path: return os.listdir(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) return list(set(", "3.4.3 of Python. if 'context' in kwargs: del kwargs['context'] opener = urlopen if", "The above move failed because it crosses # filesystem boundaries. Copy the file", "Prints out a warning only if the root can't be fetched; it ignores", "Python 3, things moved to html.parser from html.parser import HTMLParser # Also, HTMLParseError", "(new,) + headers[1:] return headers try: changed = False new_dict = {} for", "key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url))) def _spider(url, visited, root, depth, max_depth,", "- Content-length - content_length - contentlength - content_Length - contentLength - content Length", "set() # set of all links seen on visited pages. try: response_url, _,", "# Grab some web pages to scrape. pages = {} links = set()", "Good enough for a really simple spider. \"\"\" def __init__(self): HTMLParser.__init__(self) self.links =", "remote_file_path) else: try: os.rename(local_file_path, remote_file_path) except OSError as e: if e.errno == errno.EXDEV:", "whether words are separated; or, if separated, whether they are so by a", "break def list_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.listdir(local_path)", "# We need to add a / to the beginning of the regex", "{} links = set() for lurl in list_urls: pg, lnk = spider(lurl, depth=list_depth)", "read_from_url(url) return True except URLError: return False def remove_url(url): url = url_util.parse(url) local_path", "[] def handle_starttag(self, tag, attrs): if tag == 'a': for attr, val in", "= ( accept_content_type and ( content_type is None or not content_type.startswith(accept_content_type))) if reject_content_type:", "(-), underscore (_), or space ( ). Header names that cannot be mapped", "# with versions 2.7.9 and 3.4.3 of Python. if 'context' in kwargs: del", "== 'https': return True return False __UNABLE_TO_VERIFY_SSL = ( lambda pyver: ( (pyver", "is its corresponding element in headers, but mapped as above if a string", "if list_url is not None: list_urls.add(list_url) for aurl in archive_urls: list_urls |= spack.url.find_list_urls(aurl)", "spack.s3_handler opener = spack.s3_handler.open return opener(req, *args, **kwargs) def spider(root, depth=0): \"\"\"Gets web", "accept_content_type and is_web_url: # Make a HEAD request first to check the content", "a list of list_urls based on archive urls and any # explicitly listed", "- Date - Last-modified - Server Every name considered is translated to one", "__init__(self, message, url): super(NoNetworkConnectionError, self).__init__( \"No network connection: \" + str(message), \"URL was:", "for suf in ALLOWED_ARCHIVE_TYPES): continue # Skip things outside the root directory if", "string, then it (or an appropriate substitute) is returned. If headers is a", "ignores errors with pages that the root links to. Returns a tuple of:", "2.7.3 or newer for better HTML parsing.\" tty.warn(msg, url, \"HTMLParseError: \" + str(e))", "with old versions of Python.\"\"\" url = req try: url = url.get_full_url() except", "s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return # Don't even try for other URL schemes. def", "return headers return standardized_spelling return headers if isinstance(headers, tuple): if not headers: return", "in the page link_parser = LinkParser() subcalls = [] link_parser.feed(page) while link_parser.links: raw_link", "mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path, remote_file_path) else: try: os.rename(local_file_path, remote_file_path) except OSError as e:", "r'(\\/download)?$' regexes.append(url_regex) # Build a dict version -> URL from any links that", "spellings. Standardizes the spellings of the following header names: - Accept-ranges - Content-length", "anchor to the end of the regex to prevent # Spack from picking", "be done in separate steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif remote_url.scheme == 's3': extra_args", "to follow from the root. Prints out a warning only if the root", "searches the parent directories of archives. Keyword Arguments: list_url (str or None): URL", "s3_util.create_s3_session(url) return list(set( key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url))) def _spider(url, visited,", "= urlopen if url_util.parse(url).scheme == 's3': import spack.s3_handler opener = spack.s3_handler.open return opener(req,", "True if parsed_url.scheme == 's3': endpoint_url = os.environ.get('S3_ENDPOINT_URL') if not endpoint_url: return True", "Python 2 had these in the HTMLParser package. from HTMLParser import HTMLParser, HTMLParseError", "+ url_regex # We need to add a $ anchor to the end", "a bit more liberal and just look for the archive # part, not", "If headers is a string, then it (or an appropriate substitute) is returned.", "verify_ssl = spack.config.get('config:verify_ssl') # Don't even bother with a context unless the URL", "a dict version -> URL from any links that match the wildcards. #", "result['IsTruncated']: last_key = result['Contents'][-1]['Key'] iter = (key for key in ( os.path.relpath(entry['Key'], url.path)", "a sequence, then a new list is considered, where each element is its", "had these in the HTMLParser package. from HTMLParser import HTMLParser, HTMLParseError except ImportError:", "will not check SSL certificates. Use this at your \" \"own risk.\") if", "Spack from picking up similarly named packages like: # https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz # https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz #", "tag, attrs): if tag == 'a': for attr, val in attrs: if attr", "links from the root. This will spawn processes to fetch the children, for", "for attr, val in attrs: if attr == 'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process", "import print_function import codecs import errno import re import os import os.path import", "We don't pass 'context' parameter because it was only introduced starting # with", "headers map to the same key after being standardized, then the value for", "text. - links: set of links encountered while visiting the pages. \"\"\" pages", "be fetched; it ignores errors with pages that the root links to. Returns", "return False def remove_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: os.remove(local_path)", "liberal and just look for the archive # part, not the full path.", "local_path = url_util.local_file_path(url) if local_path: return os.listdir(local_path) if url.scheme == 's3': s3 =", "for key, value in headers.items(): if isinstance(key, (tuple, six.string_types)): old_key, key = key,", "product('Cc', _separators, 'Ll')), \"Content-type\": set( ''.join((C, 'ontent', sep, T, 'ype')) for C, sep,", "seconds for web requests _timeout = 10 # See docstring for standardize_header_names() _separators", "use this to extract things # on a page that look like archive", "list_url links. versions = {} for url in archive_urls + sorted(links): if any(re.search(r,", "verification.\") def push_to_url(local_file_path, remote_path, **kwargs): keep_original = kwargs.get('keep_original', True) remote_url = url_util.parse(remote_path) verify_ssl", "url) for r in regexes): try: ver = spack.url.parse_version(url) versions[ver] = url except", "changed else headers except TypeError: pass return headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack", "HTMLParser package. from HTMLParser import HTMLParser, HTMLParseError except ImportError: # In Python 3,", "with Accept: text/html. req.get_method = lambda: \"HEAD\" resp = _urlopen(req, timeout=_timeout, context=context) content_type", "M in product('Ll', _separators, 'Mm')), \"Server\": set(('Server', 'server')) } class LinkParser(HTMLParser): \"\"\"This parser", "generic wildcard, so we can use this to extract things # on a", "certificate verification.\") def push_to_url(local_file_path, remote_path, **kwargs): keep_original = kwargs.get('keep_original', True) remote_url = url_util.parse(remote_path)", "the list_url links. versions = {} for url in archive_urls + sorted(links): if", "headers except (AttributeError, TypeError, ValueError): pass try: changed = False new_list = []", "header to avoid # one round-trip. However, most servers seem to ignore the", "last_key = result['Contents'][-1]['Key'] iter = (key for key in ( os.path.relpath(entry['Key'], url.path) for", "archives. Keyword Arguments: list_url (str or None): URL for a listing of archives.", "# https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz # https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz # https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/' + url_regex # We", "from spack.util.compression import ALLOWED_ARCHIVE_TYPES # Timeout in seconds for web requests _timeout =", "download links that look like the archive URL. list_depth (int): Max depth to", "e: if e.errno == errno.EXDEV: # NOTE(opadron): The above move failed because it", "return headers try: changed = False new_dict = {} for key, value in", "However, SourceForge downloads still need to end in '/download'. url_regex += r'(\\/download)?$' regexes.append(url_regex)", "response_url or not response: return pages, links page = codecs.getreader('utf-8')(response).read() pages[response_url] = page", "s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return # Don't even try for other URL", "Don't even bother with a context unless the URL scheme is one that", "when an operation can't get an internet connection.\"\"\" def __init__(self, message, url): super(NoNetworkConnectionError,", "as above if a string or tuple. The value is taken from the", "depth=0): \"\"\"Gets web pages from a root URL. If depth is specified (e.g.,", "(3, 4, 3)) ))(sys.version_info) def read_from_url(url, accept_content_type=None): url = url_util.parse(url) context = None", "just takes an HTML page and strips out the hrefs on the links.", "are capitalized; whether words are separated; or, if separated, whether they are so", "Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers.", "or None): URL for a listing of archives. Spack will scrape these pages", "= start_after result = client.list_objects_v2(**list_args) last_key = None if result['IsTruncated']: last_key = result['Contents'][-1]['Key']", "if not headers: return headers old = headers[0] if isinstance(old, six.string_types): new =", "\"own risk.\") if raise_on_error: raise NoNetworkConnectionError(str(e), url) except HTMLParseError as e: # This", "== 's3': extra_args = kwargs.get('extra_args', {}) remote_path = remote_url.path while remote_path.startswith('/'): remote_path =", "Build a dict version -> URL from any links that match the wildcards.", "prevent # Spack from picking up signature files like: # .asc # .md5", "other cases headers is returned unaltered. \"\"\" if isinstance(headers, six.string_types): for standardized_spelling, other_spellings", "headers in other_spellings: if headers == standardized_spelling: return headers return standardized_spelling return headers", "to add a $ anchor to the end of the regex to prevent", "separate steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif remote_url.scheme == 's3': extra_args = kwargs.get('extra_args', {})", "remote_url.path while remote_path.startswith('/'): remote_path = remote_path[1:] s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args)", "os import os.path import shutil import ssl import sys import traceback from itertools", "return iter, last_key def _iter_s3_prefix(client, url, num_entries=1024): key = None while True: contents,", "converted # to a generic wildcard, so we can use this to extract", "url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.listdir(local_path) if url.scheme ==", "(int): Max depth to follow links on list_url pages. Defaults to 0. \"\"\"", "< max_depth: subcalls.append((abs_link, visited, root, depth + 1, max_depth, raise_on_error)) visited.add(abs_link) if subcalls:", "== 'NoSuchKey': return False raise err # otherwise, just try to \"read\" from", "a context unless the URL scheme is one that uses # SSL certs.", "# to a generic wildcard, so we can use this to extract things", "in results: pages.update(sub_pages) links.update(sub_links) finally: pool.terminate() pool.join() except URLError as e: tty.debug(e) if", "be mapped as described above are returned unaltered. For example: The standard spelling", "else: raise NotImplementedError( 'Unrecognized URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url): url = url_util.parse(url)", "key != '.') return iter, last_key def _iter_s3_prefix(client, url, num_entries=1024): key = None", "= os.path.basename(url_regex) # We need to add a / to the beginning of", "e: tty.debug(e) if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was unable to fetch", "this. additional_list_urls = set() for lurl in list_urls: if not lurl.endswith('/'): additional_list_urls.add(lurl +", "versions will be overwritten by the list_url links. versions = {} for url", "item. If the keys of multiple items in headers map to the same", "'a': for attr, val in attrs: if attr == 'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process):", "mapped to their full text. - links: set of links encountered while visiting", "like an archive if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES): continue # Skip things", "content_Length - contentLength - content Length ... and any other header name, such", "{} # dict from page URL -> text content. links = set() #", "an HTML page and strips out the hrefs on the links. Good enough", "key is not old_key new_dict[key] = value return new_dict if changed else headers", "for different versions of a package. Typically these are just the tarballs from", "= None if result['IsTruncated']: last_key = result['Contents'][-1]['Key'] iter = (key for key in", "fetch the children, for much improved performance over a sequential fetch. \"\"\" root", "is returned. If headers is a non-empty tuple, headers[0] is a string, and", "in headers.items(): if isinstance(key, (tuple, six.string_types)): old_key, key = key, standardize_header_names(key) changed =", "None while True: contents, key = _list_s3_objects( client, url, num_entries, start_after=key) for x", "to html.parser from html.parser import HTMLParser # Also, HTMLParseError is deprecated and never", "(tuple, six.string_types)): old_item, item = item, standardize_header_names(item) changed = changed or item is", "HTMLParseError is deprecated and never raised. class HTMLParseError(Exception): pass from llnl.util.filesystem import mkdirp", "attr, val in attrs: if attr == 'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that", "\"\"\"Pool that uses non-daemon processes\"\"\" Process = NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())): Process =", "# Generate a list of list_urls based on archive urls and any #", "'odified')) for L, sep, M in product('Ll', _separators, 'Mm')), \"Server\": set(('Server', 'server')) }", "error parsing HTML.\" # Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.", "if old is not new: return (new,) + headers[1:] return headers try: changed", "page = codecs.getreader('utf-8')(response).read() pages[response_url] = page # Parse out the links in the", "or tuple. This new list is returned if at least one of its", "some web pages to scrape. pages = {} links = set() for lurl", "links. versions = {} for url in archive_urls + sorted(links): if any(re.search(r, url)", "then a new tuple is returned. This tuple has the same elements as", "https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz url_regex = '/' + url_regex # We need to add a $", "item is the key of its corresponding item in headers, mapped as above", "up to <depth> levels of links from the root. This will spawn processes", "_separators, 'Mm')), \"Server\": set(('Server', 'server')) } class LinkParser(HTMLParser): \"\"\"This parser just takes an", "problem. You can try running spack -k, \" \"which will not check SSL", "NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" def __init__(self, *args, **kwargs): kwargs['context']", "# However, SourceForge downloads still need to end in '/download'. url_regex += r'(\\/download)?$'", "these in the HTMLParser package. from HTMLParser import HTMLParser, HTMLParseError except ImportError: #", "files like: # .asc # .md5 # .sha256 # .sig # However, SourceForge", "in product('Ll', _separators, 'Mm')), \"Server\": set(('Server', 'server')) } class LinkParser(HTMLParser): \"\"\"This parser just", "not keep_original: os.remove(local_file_path) else: raise NotImplementedError( 'Unrecognized URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url):", "re import os import os.path import shutil import ssl import sys import traceback", "filesystem boundaries. Copy the file (plus original # metadata), and then delete the", "'s3': import spack.s3_handler opener = spack.s3_handler.open return opener(req, *args, **kwargs) def spider(root, depth=0):", "the spellings of the following header names: - Accept-ranges - Content-length - Content-type", "if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES): continue # Skip things outside the root", "if start_after is not None: list_args['StartAfter'] = start_after result = client.list_objects_v2(**list_args) last_key =", "key = key, standardize_header_names(key) changed = changed or key is not old_key new_dict[key]", "response.headers, response def warn_no_ssl_cert_checking(): tty.warn(\"Spack will not check SSL certificates. You need to", "improved performance over a sequential fetch. \"\"\" root = url_util.parse(root) pages, links =", "for C, sep, L in product('Cc', _separators, 'Ll')), \"Content-type\": set( ''.join((C, 'ontent', sep,", "= NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" def __init__(self, *args, **kwargs):", "%s:%s\" % (type(e), e), traceback.format_exc()) return pages, links def _spider_wrapper(args): \"\"\"Wrapper for using", "to update \" \"your Python to enable certificate verification.\") def push_to_url(local_file_path, remote_path, **kwargs):", "'Tt')), \"Date\": set(('Date', 'date')), \"Last-modified\": set( ''.join((L, 'ast', sep, M, 'odified')) for L,", "Prefix=url.path, MaxKeys=num_entries) if start_after is not None: list_args['StartAfter'] = start_after result = client.list_objects_v2(**list_args)", "url) except HTMLParseError as e: # This error indicates that Python's HTML parser", "pages[response_url] = page # Parse out the links in the page link_parser =", "reject_content_type = ( accept_content_type and ( content_type is None or not content_type.startswith(accept_content_type))) if", "is returned unaltered. \"\"\" if isinstance(headers, six.string_types): for standardized_spelling, other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()):", "for x in contents: yield x if not key: break def list_url(url): url", "- Accept-ranges - Content-length - Content-type - Date - Last-modified - Server Every", "the root links to. Returns a tuple of: - pages: dict of pages", "url_util.parse(url).scheme == 's3': import spack.s3_handler opener = spack.s3_handler.open return opener(req, *args, **kwargs) def", "remote_file_path) os.remove(local_file_path) elif remote_url.scheme == 's3': extra_args = kwargs.get('extra_args', {}) remote_path = remote_url.path", "with a capture group for # the version part of the URL. The", "accept_content_type=None): url = url_util.parse(url) context = None verify_ssl = spack.config.get('config:verify_ssl') # Don't even", "os.environ.get('S3_ENDPOINT_URL') if not endpoint_url: return True if url_util.parse(endpoint_url, scheme='https').scheme == 'https': return True", "taken from the corresponding item. If the keys of multiple items in headers", "response = _urlopen(req, timeout=_timeout, context=context) if accept_content_type and not is_web_url: content_type = response.headers.get('Content-type')", "if __UNABLE_TO_VERIFY_SSL: # User wants SSL verification, but it cannot be provided. warn_no_ssl_cert_checking()", "err: if err.response['Error']['Code'] == 'NoSuchKey': return False raise err # otherwise, just try", "lurl in list_urls: if not lurl.endswith('/'): additional_list_urls.add(lurl + '/') list_urls |= additional_list_urls #", "its elements differ from their corrsponding element in headers. If headers is a", "context = ssl.create_default_context() else: # User has explicitly indicated that they do not", "dash (-), underscore (_), or space ( ). Header names that cannot be", "Standardizes the spellings of the following header names: - Accept-ranges - Content-length -", "can try running spack -k, \" \"which will not check SSL certificates. Use", "element is its corresponding element in headers, but mapped as above if a", "out the links in the page link_parser = LinkParser() subcalls = [] link_parser.feed(page)", "in separate steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif remote_url.scheme == 's3': extra_args = kwargs.get('extra_args',", "SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack web spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an operation", "This error indicates that Python's HTML parser sucks. msg = \"Got an error", "Every name considered is translated to one of the above names if the", "HTML parsing. if sys.version_info[:3] < (2, 7, 3): msg += \" Use Python", "look like the archive URL. list_depth (int): Max depth to follow links on", "False __UNABLE_TO_VERIFY_SSL = ( lambda pyver: ( (pyver < (2, 7, 9)) or", "update \" \"your Python to enable certificate verification.\") def push_to_url(local_file_path, remote_path, **kwargs): keep_original", "1, max_depth, raise_on_error)) visited.add(abs_link) if subcalls: pool = NonDaemonPool(processes=len(subcalls)) try: results = pool.map(_spider_wrapper,", "of Python.\"\"\" url = req try: url = url.get_full_url() except AttributeError: pass #", "returned if at least one of its items has a key that differs", "completely ignored, except in debug mode. tty.debug(\"Error in _spider: %s:%s\" % (type(e), e),", "pass 'context' parameter because it was only introduced starting # with versions 2.7.9", "avoid # one round-trip. However, most servers seem to ignore the header #", "versions = {} for url in archive_urls + sorted(links): if any(re.search(r, url) for", "standardize_header_names(item) changed = changed or item is not old_item new_list.append(item) return new_list if", "This will spawn processes to fetch the children, for much improved performance over", "= [archive_urls] # Generate a list of list_urls based on archive urls and", "capture group for # the version part of the URL. The capture group", "full text. - links: set of links encountered while visiting the pages. \"\"\"", "in ( HTTP_HEADER_NAME_ALIASES.items()): if headers in other_spellings: if headers == standardized_spelling: return headers", "= ( lambda pyver: ( (pyver < (2, 7, 9)) or ((3,) <", "one of its items has a key that differs from that of their", "better HTML parsing.\" tty.warn(msg, url, \"HTMLParseError: \" + str(e)) except Exception as e:", "is not None: mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path, remote_file_path) else: try: os.rename(local_file_path, remote_file_path) except", "os.path.basename(url_regex) # We need to add a / to the beginning of the", "# User wants SSL verification, and it *can* be provided. context = ssl.create_default_context()", "the parent directories of archives. Keyword Arguments: list_url (str or None): URL for", "least one of its items has a key that differs from that of", "and assume that *any* # non-throwing response contains the resource represented by the", "get an internet connection.\"\"\" def __init__(self, message, url): super(NoNetworkConnectionError, self).__init__( \"No network connection:", "headers, but mapped as above if a string or tuple. This new list", "def _iter_s3_prefix(client, url, num_entries=1024): key = None while True: contents, key = _list_s3_objects(", "if remote_file_path is not None: mkdirp(os.path.dirname(remote_file_path)) if keep_original: shutil.copy(local_file_path, remote_file_path) else: try: os.rename(local_file_path,", "when we know it's just HTML. req.get_method = lambda: \"GET\" response = _urlopen(req,", "to fetch url list due to a certificate \" \"verification problem. You can", "return pages, links def _spider_wrapper(args): \"\"\"Wrapper for using spider with multiprocessing.\"\"\" return _spider(*args)", "e.errno == errno.EXDEV: # NOTE(opadron): The above move failed because it crosses #", "= ('', ' ', '_', '-') HTTP_HEADER_NAME_ALIASES = { \"Accept-ranges\": set( ''.join((A, 'ccept',", "ClientError as err: if err.response['Error']['Code'] == 'NoSuchKey': return False raise err # otherwise,", "spack.url.find_list_urls(aurl) # Add '/' to the end of the URL. Some web servers", "the resource represented by the URL try: read_from_url(url) return True except URLError: return", "context = None verify_ssl = spack.config.get('config:verify_ssl') # Don't even bother with a context", "in result['Contents'] ) if key != '.') return iter, last_key def _iter_s3_prefix(client, url,", "a string, and there exists a standardized spelling for header[0] that differs from", "last_key = None if result['IsTruncated']: last_key = result['Contents'][-1]['Key'] iter = (key for key", "non-empty tuple, headers[0] is a string, and there exists a standardized spelling for", "Timeout in seconds for web requests _timeout = 10 # See docstring for", "links to. Returns a tuple of: - pages: dict of pages visited (URL)", "verification, but it cannot be provided. warn_no_ssl_cert_checking() else: # User wants SSL verification,", "list_urls based on archive urls and any # explicitly listed list_url in the", "'/' + url_regex # We need to add a $ anchor to the", "None verify_ssl = spack.config.get('config:verify_ssl') # Don't even bother with a context unless the", "import re import os import os.path import shutil import ssl import sys import", "the two is how the first letters of each word are capitalized; whether", "for web requests _timeout = 10 # See docstring for standardize_header_names() _separators =", "# Don't even try for other URL schemes. def _list_s3_objects(client, url, num_entries, start_after=None):", "in ( os.path.relpath(entry['Key'], url.path) for entry in result['Contents'] ) if key != '.')", "url): super(NoNetworkConnectionError, self).__init__( \"No network connection: \" + str(message), \"URL was: \" +", "num_entries, start_after=key) for x in contents: yield x if not key: break def", "R in product('Aa', _separators, 'Rr')), \"Content-length\": set( ''.join((C, 'ontent', sep, L, 'ength')) for", "items in headers map to the same key after being standardized, then the", "# SSL certs. if uses_ssl(url): if verify_ssl: if __UNABLE_TO_VERIFY_SSL: # User wants SSL", "or space ( ). Header names that cannot be mapped as described above", "else: try: os.rename(local_file_path, remote_file_path) except OSError as e: if e.errno == errno.EXDEV: #", "with multiprocessing.\"\"\" return _spider(*args) def _urlopen(req, *args, **kwargs): \"\"\"Wrapper for compatibility with old", "but it cannot be provided. warn_no_ssl_cert_checking() else: # User wants SSL verification, and", "introduced starting # with versions 2.7.9 and 3.4.3 of Python. if 'context' in", "versions def standardize_header_names(headers): \"\"\"Replace certain header names with standardized spellings. Standardizes the spellings", "add a $ anchor to the end of the regex to prevent #", "accept_content_type and ( content_type is None or not content_type.startswith(accept_content_type))) if reject_content_type: tty.debug(\"ignoring page", "\"\"\"Pool that uses non-daemon processes\"\"\" def __init__(self, *args, **kwargs): kwargs['context'] = NonDaemonContext() super(NonDaemonPool,", "self).__init__(*args, **kwargs) def uses_ssl(parsed_url): if parsed_url.scheme == 'https': return True if parsed_url.scheme ==", "__init__(self): HTMLParser.__init__(self) self.links = [] def handle_starttag(self, tag, attrs): if tag == 'a':", "__UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url) if remote_file_path is not", "os.path.exists(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) from botocore.exceptions import ClientError try:", "the URL try: read_from_url(url) return True except URLError: return False def remove_url(url): url", "+ str(e)) except Exception as e: # Other types of errors are completely", "(2, 7, 9)) or ((3,) < pyver < (3, 4, 3)) ))(sys.version_info) def", "for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from __future__ import print_function import", "- pages: dict of pages visited (URL) mapped to their full text. -", "pass from llnl.util.filesystem import mkdirp import llnl.util.tty as tty import spack.cmd import spack.config", "HTTP Accept header to avoid # one round-trip. However, most servers seem to", "be provided. warn_no_ssl_cert_checking() else: # User wants SSL verification, and it *can* be", "of archives. Spack will scrape these pages for download links that look like", "= [] for aurl in archive_urls: # This creates a regex from the", "mode. tty.debug(\"Error in _spider: %s:%s\" % (type(e), e), traceback.format_exc()) return pages, links def", "< (2, 7, 9)) or ((3,) < pyver < (3, 4, 3)) ))(sys.version_info)", "URLs for different versions of a package. Typically these are just the tarballs", "False) return pages, links def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape web pages for new", "def _urlopen(req, *args, **kwargs): \"\"\"Wrapper for compatibility with old versions of Python.\"\"\" url", "except in debug mode. tty.debug(\"Error in _spider: %s:%s\" % (type(e), e), traceback.format_exc()) return", "< 3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" Process = NonDaemonProcess else:", "original. This operation # needs to be done in separate steps. shutil.copy2(local_file_path, remote_file_path)", "SPDX-License-Identifier: (Apache-2.0 OR MIT) from __future__ import print_function import codecs import errno import", "def __init__(self): HTMLParser.__init__(self) self.links = [] def handle_starttag(self, tag, attrs): if tag ==", "= url_util.local_file_path(url) if local_path: return os.path.exists(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url)", "root directory if not abs_link.startswith(root): continue # Skip already-visited links if abs_link in", "if url_util.parse(url).scheme == 's3': import spack.s3_handler opener = spack.s3_handler.open return opener(req, *args, **kwargs)", "local_path: return os.path.exists(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) from botocore.exceptions import", "links.add(abs_link) # Skip stuff that looks like an archive if any(raw_link.endswith(suf) for suf", "# Make a HEAD request first to check the content type. This lets", "s3 = s3_util.create_s3_session(url) return list(set( key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url))) def", "# us ignore tarballs and gigantic files. # It would be nice to", "= url.get_full_url() except AttributeError: pass # We don't pass 'context' parameter because it", "look for the archive # part, not the full path. url_regex = os.path.basename(url_regex)", "remote_url = url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking()", "headers is a non-empty tuple, headers[0] is a string, and there exists a", "of the following header names: - Accept-ranges - Content-length - Content-type - Date", "differs from that of their corresponding item in headers, or if the keys", "= { \"Accept-ranges\": set( ''.join((A, 'ccept', sep, R, 'anges')) for A, sep, R", "is one that uses # SSL certs. if uses_ssl(url): if verify_ssl: if __UNABLE_TO_VERIFY_SSL:", "try: results = pool.map(_spider_wrapper, subcalls) for sub_pages, sub_links in results: pages.update(sub_pages) links.update(sub_links) finally:", "kwargs.get('extra_args', {}) remote_path = remote_url.path while remote_path.startswith('/'): remote_path = remote_path[1:] s3 = s3_util.create_s3_session(remote_url)", "spelling for headers[0]. If headers is a sequence, then a new list is", "( ). Header names that cannot be mapped as described above are returned", "else: # User has explicitly indicated that they do not want SSL #", "_spider: %s:%s\" % (type(e), e), traceback.format_exc()) return pages, links def _spider_wrapper(args): \"\"\"Wrapper for", "returned unaltered. \"\"\" if isinstance(headers, six.string_types): for standardized_spelling, other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()): if", "uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url) if remote_file_path is not None: mkdirp(os.path.dirname(remote_file_path)) if keep_original:", "\"Date\": set(('Date', 'date')), \"Last-modified\": set( ''.join((L, 'ast', sep, M, 'odified')) for L, sep,", "link_parser.feed(page) while link_parser.links: raw_link = link_parser.links.pop() abs_link = url_util.join( response_url, raw_link.strip(), resolve_href=True) links.add(abs_link)", "spack.config.get('config:verify_ssl') if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url): warn_no_ssl_cert_checking() remote_file_path = url_util.local_file_path(remote_url) if remote_file_path", "'date')), \"Last-modified\": set( ''.join((L, 'ast', sep, M, 'odified')) for L, sep, M in", "kwargs['context'] = NonDaemonContext() super(NonDaemonPool, self).__init__(*args, **kwargs) def uses_ssl(parsed_url): if parsed_url.scheme == 'https': return", "be overwritten by the list_url links. versions = {} for url in archive_urls", "MaxKeys=num_entries) if start_after is not None: list_args['StartAfter'] = start_after result = client.list_objects_v2(**list_args) last_key", "tty.warn(msg, url, \"HTMLParseError: \" + str(e)) except Exception as e: # Other types", "much improved performance over a sequential fetch. \"\"\" root = url_util.parse(root) pages, links", "the links. Good enough for a really simple spider. \"\"\" def __init__(self): HTMLParser.__init__(self)", "compatibility with old versions of Python.\"\"\" url = req try: url = url.get_full_url()", "corresponding item in headers, mapped as above if a string or tuple. The", "of multiple items in headers map to the same key after being standardized,", "url.scheme == 's3': s3 = s3_util.create_s3_session(url) s3.delete_object(Bucket=url.s3_bucket, Key=url.path) return # Don't even try", "not response_url or not response: return pages, links page = codecs.getreader('utf-8')(response).read() pages[response_url] =", "$ anchor to the end of the regex to prevent # Spack from", "depth of links to follow from the root. Prints out a warning only", "key: break def list_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return", "ssl.SSLError): tty.warn(\"Spack was unable to fetch url list due to a certificate \"", "is a non-empty tuple, headers[0] is a string, and there exists a standardized", "a standardized spelling for header[0] that differs from it, then a new tuple", "links if abs_link in visited: continue # If we're not at max depth,", "we know it's just HTML. req.get_method = lambda: \"GET\" response = _urlopen(req, timeout=_timeout,", "headers, mapped as above if a string or tuple. The value is taken", "**kwargs): \"\"\"Wrapper for compatibility with old versions of Python.\"\"\" url = req try:", "the key in each item is the key of its corresponding item in", "In Python 3, things moved to html.parser from html.parser import HTMLParser # Also,", "def uses_ssl(parsed_url): if parsed_url.scheme == 'https': return True if parsed_url.scheme == 's3': endpoint_url", "err.response['Error']['Code'] == 'NoSuchKey': return False raise err # otherwise, just try to \"read\"", "indicated that they do not want SSL # verification. context = ssl._create_unverified_context() req", "the above names if the only difference between the two is how the", "SSL # verification. context = ssl._create_unverified_context() req = Request(url_util.format(url)) content_type = None is_web_url", "original # metadata), and then delete the original. This operation # needs to", "and is_web_url: # Make a HEAD request first to check the content type.", "certificate \" \"verification problem. You can try running spack -k, \" \"which will", "standard spelling of \"Content-length\" would be substituted for any of the following names:", "from that of their corresponding item in headers, or if the keys of", "tuple. This new list is returned if at least one of its elements", "or list or tuple): URL or sequence of URLs for different versions of", "[archive_urls] # Generate a list of list_urls based on archive urls and any", "if changed else headers except TypeError: pass return headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass for", "url list due to a certificate \" \"verification problem. You can try running", "links to follow from the root. Prints out a warning only if the", "not old_key new_dict[key] = value return new_dict if changed else headers except (AttributeError,", "not endpoint_url: return True if url_util.parse(endpoint_url, scheme='https').scheme == 'https': return True return False", "that cannot be mapped as described above are returned unaltered. For example: The", "standardized spellings. Standardizes the spellings of the following header names: - Accept-ranges -", "+ 1, max_depth, raise_on_error)) visited.add(abs_link) if subcalls: pool = NonDaemonPool(processes=len(subcalls)) try: results =", "URL. The capture group is converted # to a generic wildcard, so we", "the standardized spelling for headers[0]. If headers is a sequence, then a new", "Arguments: archive_urls (str or list or tuple): URL or sequence of URLs for", "can have sub-pools.\"\"\" @property def daemon(self): return False @daemon.setter def daemon(self, value): pass", "six from six.moves.urllib.request import urlopen, Request from six.moves.urllib.error import URLError import multiprocessing.pool try:", "(Apache-2.0 OR MIT) from __future__ import print_function import codecs import errno import re", "continue # Skip things outside the root directory if not abs_link.startswith(root): continue #", "item, standardize_header_names(item) changed = changed or item is not old_item new_list.append(item) return new_list", "results: pages.update(sub_pages) links.update(sub_links) finally: pool.terminate() pool.join() except URLError as e: tty.debug(e) if hasattr(e,", "e), traceback.format_exc()) return pages, links def _spider_wrapper(args): \"\"\"Wrapper for using spider with multiprocessing.\"\"\"", "spack.url.UndetectableVersionError: continue return versions def standardize_header_names(headers): \"\"\"Replace certain header names with standardized spellings.", "will spawn processes to fetch the children, for much improved performance over a", "debug mode. tty.debug(\"Error in _spider: %s:%s\" % (type(e), e), traceback.format_exc()) return pages, links", "set of links encountered while visiting the pages. \"\"\" pages = {} #", "value in headers.items(): if isinstance(key, (tuple, six.string_types)): old_key, key = key, standardize_header_names(key) changed", "six.moves.urllib.request import urlopen, Request from six.moves.urllib.error import URLError import multiprocessing.pool try: # Python", "follow links. if depth < max_depth: subcalls.append((abs_link, visited, root, depth + 1, max_depth,", "\" if content_type is not None else \"\", content_type or \"\")) return None,", "# Scrape them for archive URLs regexes = [] for aurl in archive_urls:", "each item is the key of its corresponding item in headers, mapped as", "at least one of its elements differ from their corrsponding element in headers.", "handle_starttag(self, tag, attrs): if tag == 'a': for attr, val in attrs: if", "verification. context = ssl._create_unverified_context() req = Request(url_util.format(url)) content_type = None is_web_url = url.scheme", "warn_no_ssl_cert_checking(): tty.warn(\"Spack will not check SSL certificates. You need to update \" \"your", "example: The standard spelling of \"Content-length\" would be substituted for any of the", "import URLError import multiprocessing.pool try: # Python 2 had these in the HTMLParser", "parsing HTML.\" # Pre-2.7.3 Pythons in particular have rather prickly HTML parsing. if", "abs_link = url_util.join( response_url, raw_link.strip(), resolve_href=True) links.add(abs_link) # Skip stuff that looks like", "isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack was unable to fetch url list due to a certificate", "tuple): URL or sequence of URLs for different versions of a package. Typically", "requests _timeout = 10 # See docstring for standardize_header_names() _separators = ('', '", "not key: break def list_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path:", "os.listdir(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) return list(set( key.split('/', 1)[0] for", "depth, max_depth, raise_on_error): \"\"\"Fetches URL and any pages it links to up to", "\"\"\"Wrapper for using spider with multiprocessing.\"\"\" return _spider(*args) def _urlopen(req, *args, **kwargs): \"\"\"Wrapper", "headers.items(): if isinstance(key, (tuple, six.string_types)): old_key, key = key, standardize_header_names(key) changed = changed", "else: # User wants SSL verification, and it *can* be provided. context =", "for a listing of archives. Spack will scrape these pages for download links", "urls and any # explicitly listed list_url in the package list_urls = set()", "to up to max_depth. depth should initially be zero, and max_depth is the", "in particular have rather prickly HTML parsing. if sys.version_info[:3] < (2, 7, 3):", "NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" Process = NonDaemonProcess else: class NonDaemonContext(type(multiprocessing.get_context())): Process", "ClientError try: s3.get_object(Bucket=url.netloc, Key=url.path) return True except ClientError as err: if err.response['Error']['Code'] ==", "os.remove(local_file_path) else: raise NotImplementedError( 'Unrecognized URL scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url): url =", "the tarballs from the package file itself. By default, this searches the parent", "links first. # Any conflicting versions will be overwritten by the list_url links.", "isinstance(item, (tuple, six.string_types)): old_item, item = item, standardize_header_names(item) changed = changed or item", "first to check the content type. This lets # us ignore tarballs and", "be zero, and max_depth is the max depth of links to follow from", "through archive_url links first. # Any conflicting versions will be overwritten by the", "headers[0] is a string, and there exists a standardized spelling for header[0] that", "attr == 'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows sub-processes, so pools can", "resolve_href=True) links.add(abs_link) # Skip stuff that looks like an archive if any(raw_link.endswith(suf) for", "if uses_ssl(url): if verify_ssl: if __UNABLE_TO_VERIFY_SSL: # User wants SSL verification, but it", "url_regex = '/' + url_regex # We need to add a $ anchor", "links.update(lnk) # Scrape them for archive URLs regexes = [] for aurl in", "archive_urls (str or list or tuple): URL or sequence of URLs for different", "six.string_types): new = standardize_header_names(old) if old is not new: return (new,) + headers[1:]", "if any(re.search(r, url) for r in regexes): try: ver = spack.url.parse_version(url) versions[ver] =", "url in archive_urls + sorted(links): if any(re.search(r, url) for r in regexes): try:", "COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from __future__ import", "url, num_entries=1024): key = None while True: contents, key = _list_s3_objects( client, url,", "version -> URL from any links that match the wildcards. # Walk through", "follow links on list_url pages. Defaults to 0. \"\"\" if not isinstance(archive_urls, (list,", "in archive_urls: # This creates a regex from the URL with a capture", "but mapped as above if a string or tuple. This new list is", "from it, then a new tuple is returned. This tuple has the same", "= link_parser.links.pop() abs_link = url_util.join( response_url, raw_link.strip(), resolve_href=True) links.add(abs_link) # Skip stuff that", "for a tarball with Accept: text/html. req.get_method = lambda: \"HEAD\" resp = _urlopen(req,", "_iter_s3_prefix(s3, url))) def _spider(url, visited, root, depth, max_depth, raise_on_error): \"\"\"Fetches URL and any", "*args, **kwargs) def spider(root, depth=0): \"\"\"Gets web pages from a root URL. If", "max_depth, raise_on_error): \"\"\"Fetches URL and any pages it links to up to max_depth.", "aurl in archive_urls: # This creates a regex from the URL with a", "pyver < (3, 4, 3)) ))(sys.version_info) def read_from_url(url, accept_content_type=None): url = url_util.parse(url) context", "as e: # Other types of errors are completely ignored, except in debug", "By default, this searches the parent directories of archives. Keyword Arguments: list_url (str", "their full text. - links: set of links encountered while visiting the pages.", "archive_urls = [archive_urls] # Generate a list of list_urls based on archive urls", "still need to end in '/download'. url_regex += r'(\\/download)?$' regexes.append(url_regex) # Build a", "scheme='https').scheme == 'https': return True return False __UNABLE_TO_VERIFY_SSL = ( lambda pyver: (", "return False @daemon.setter def daemon(self, value): pass if sys.version_info[0] < 3: class NonDaemonPool(multiprocessing.pool.Pool):", "URLError: return False def remove_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path:", "= url_util.parse(root) pages, links = _spider(root, set(), root, 0, depth, False) return pages,", "translated to one of the above names if the only difference between the", "\"which will not check SSL certificates. Use this at your \" \"own risk.\")", "start_after is not None: list_args['StartAfter'] = start_after result = client.list_objects_v2(**list_args) last_key = None", "takes an HTML page and strips out the hrefs on the links. Good", "_separators, 'Rr')), \"Content-length\": set( ''.join((C, 'ontent', sep, L, 'ength')) for C, sep, L", "additional_list_urls = set() for lurl in list_urls: if not lurl.endswith('/'): additional_list_urls.add(lurl + '/')", "(e.g., depth=2), then this will also follow up to <depth> levels of links", "a tarball with Accept: text/html. req.get_method = lambda: \"HEAD\" resp = _urlopen(req, timeout=_timeout,", "to avoid # one round-trip. However, most servers seem to ignore the header", "page URL -> text content. links = set() # set of all links", "= result['Contents'][-1]['Key'] iter = (key for key in ( os.path.relpath(entry['Key'], url.path) for entry", "opener = spack.s3_handler.open return opener(req, *args, **kwargs) def spider(root, depth=0): \"\"\"Gets web pages", "*any* # non-throwing response contains the resource represented by the URL try: read_from_url(url)", "new dict is returned if at least one of its items has a", "have sub-pools.\"\"\" @property def daemon(self): return False @daemon.setter def daemon(self, value): pass if", "tty import spack.cmd import spack.config import spack.error import spack.url import spack.util.crypto import spack.util.s3", "above names if the only difference between the two is how the first", "standardize_header_names(key) changed = changed or key is not old_key new_dict[key] = value return", "in product('Cc', _separators, 'Ll')), \"Content-type\": set( ''.join((C, 'ontent', sep, T, 'ype')) for C,", "the regex to prevent # Spack from picking up similarly named packages like:", "already-visited links if abs_link in visited: continue # If we're not at max", "(pyver < (2, 7, 9)) or ((3,) < pyver < (3, 4, 3))", "the file (plus original # metadata), and then delete the original. This operation", "_separators, 'Tt')), \"Date\": set(('Date', 'date')), \"Last-modified\": set( ''.join((L, 'ast', sep, M, 'odified')) for", "SSL certs. if uses_ssl(url): if verify_ssl: if __UNABLE_TO_VERIFY_SSL: # User wants SSL verification,", "= url_util.local_file_path(url) if local_path: return os.listdir(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url)", "is taken from the corresponding item. If the keys of multiple items in", "\"\"\"Superclass for Spack web spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError): \"\"\"Raised when an operation can't", "of its elements differ from their corrsponding element in headers. If headers is", "headers try: changed = False new_dict = {} for key, value in headers.items():", "while remote_path.startswith('/'): remote_path = remote_path[1:] s3 = s3_util.create_s3_session(remote_url) s3.upload_file(local_file_path, remote_url.netloc, remote_path, ExtraArgs=extra_args) if", "pages: dict of pages visited (URL) mapped to their full text. - links:", "url_util.local_file_path(url) if local_path: return os.listdir(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) return", "This creates a regex from the URL with a capture group for #", "_list_s3_objects(client, url, num_entries, start_after=None): list_args = dict( Bucket=url.netloc, Prefix=url.path, MaxKeys=num_entries) if start_after is", "(str or list or tuple): URL or sequence of URLs for different versions", "yield x if not key: break def list_url(url): url = url_util.parse(url) local_path =", "multiple items in headers map to the same key after being standardized. In", "sub_pages, sub_links in results: pages.update(sub_pages) links.update(sub_links) finally: pool.terminate() pool.join() except URLError as e:", "not the full path. url_regex = os.path.basename(url_regex) # We need to add a", "(str or None): URL for a listing of archives. Spack will scrape these", "# Pre-2.7.3 Pythons in particular have rather prickly HTML parsing. if sys.version_info[:3] <", "versions[ver] = url except spack.url.UndetectableVersionError: continue return versions def standardize_header_names(headers): \"\"\"Replace certain header", "= \"Got an error parsing HTML.\" # Pre-2.7.3 Pythons in particular have rather", "\"\"\"Scrape web pages for new versions of a tarball. Arguments: archive_urls (str or", "url_util.local_file_path(url) if local_path: return os.path.exists(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) from", "pages.update(pg) links.update(lnk) # Scrape them for archive URLs regexes = [] for aurl", "URL -> text content. links = set() # set of all links seen", "for header[0] that differs from it, then a new tuple is returned. This", "max depth of links to follow from the root. Prints out a warning", "has the same elements as headers, except the first element is the standardized", "from any links that match the wildcards. # Walk through archive_url links first.", "= 10 # See docstring for standardize_header_names() _separators = ('', ' ', '_',", "Use Python 2.7.3 or newer for better HTML parsing.\" tty.warn(msg, url, \"HTMLParseError: \"", "strips out the hrefs on the links. Good enough for a really simple", "the version part of the URL. The capture group is converted # to", "pass return headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack web spidering errors.\"\"\" class NoNetworkConnectionError(SpackWebError):", "llnl.util.tty as tty import spack.cmd import spack.config import spack.error import spack.url import spack.util.crypto", "attrs: if attr == 'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows sub-processes, so", "if local_path: return os.path.exists(local_path) if url.scheme == 's3': s3 = s3_util.create_s3_session(url) from botocore.exceptions", "a package. Typically these are just the tarballs from the package file itself.", "< pyver < (3, 4, 3)) ))(sys.version_info) def read_from_url(url, accept_content_type=None): url = url_util.parse(url)", "# Skip already-visited links if abs_link in visited: continue # If we're not", "if separated, whether they are so by a dash (-), underscore (_), or", "url.scheme in ('http', 'https') if accept_content_type and is_web_url: # Make a HEAD request", "pool.join() except URLError as e: tty.debug(e) if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError): tty.warn(\"Spack", "find_versions_of_archive(archive_urls, list_url=None, list_depth=0): \"\"\"Scrape web pages for new versions of a tarball. Arguments:", "Keyword Arguments: list_url (str or None): URL for a listing of archives. Spack", "Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0", "= url_util.local_file_path(url) if local_path: os.remove(local_path) return if url.scheme == 's3': s3 = s3_util.create_s3_session(url)", "if a string or tuple. The value is taken from the corresponding item.", "try running spack -k, \" \"which will not check SSL certificates. Use this", "resp = _urlopen(req, timeout=_timeout, context=context) content_type = resp.headers.get('Content-type') # Do the real GET", "sequential fetch. \"\"\" root = url_util.parse(root) pages, links = _spider(root, set(), root, 0,", "import HTMLParser # Also, HTMLParseError is deprecated and never raised. class HTMLParseError(Exception): pass", "= None while True: contents, key = _list_s3_objects( client, url, num_entries, start_after=key) for", "def handle_starttag(self, tag, attrs): if tag == 'a': for attr, val in attrs:", "failed because it crosses # filesystem boundaries. Copy the file (plus original #", "else: class NonDaemonContext(type(multiprocessing.get_context())): Process = NonDaemonProcess class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\"", "the root. Prints out a warning only if the root can't be fetched;", "# filesystem boundaries. Copy the file (plus original # metadata), and then delete", "if isinstance(item, (tuple, six.string_types)): old_item, item = item, standardize_header_names(item) changed = changed or", "scheme: {SCHEME}'.format( SCHEME=remote_url.scheme)) def url_exists(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if local_path:", "except URLError: return False def remove_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if", "ssl.create_default_context() else: # User has explicitly indicated that they do not want SSL", "type \" if content_type is not None else \"\", content_type or \"\")) return", "# .sha256 # .sig # However, SourceForge downloads still need to end in", "\"\"\" if isinstance(headers, six.string_types): for standardized_spelling, other_spellings in ( HTTP_HEADER_NAME_ALIASES.items()): if headers in", "lets # us ignore tarballs and gigantic files. # It would be nice", "string or tuple. This new list is returned if at least one of", "continue # If we're not at max depth, follow links. if depth <", "list or tuple): URL or sequence of URLs for different versions of a", "== 'a': for attr, val in attrs: if attr == 'href': self.links.append(val) class", "considered, where each element is its corresponding element in headers, but mapped as", "the regex to prevent # Spack from picking up signature files like: #", "val in attrs: if attr == 'href': self.links.append(val) class NonDaemonProcess(multiprocessing.Process): \"\"\"Process that allows", "Typically these are just the tarballs from the package file itself. By default,", "difference between the two is how the first letters of each word are", "if not key: break def list_url(url): url = url_util.parse(url) local_path = url_util.local_file_path(url) if", "= Request(url_util.format(url)) content_type = None is_web_url = url.scheme in ('http', 'https') if accept_content_type", "except (AttributeError, TypeError, ValueError): pass try: changed = False new_list = [] for", "URL, and assume that *any* # non-throwing response contains the resource represented by", "raise_on_error: raise NoNetworkConnectionError(str(e), url) except HTMLParseError as e: # This error indicates that", "pages. Defaults to 0. \"\"\" if not isinstance(archive_urls, (list, tuple)): archive_urls = [archive_urls]", "while True: contents, key = _list_s3_objects( client, url, num_entries, start_after=key) for x in", "depth < max_depth: subcalls.append((abs_link, visited, root, depth + 1, max_depth, raise_on_error)) visited.add(abs_link) if", "elements as headers, except the first element is the standardized spelling for headers[0].", "like: # .asc # .md5 # .sha256 # .sig # However, SourceForge downloads", "headers except TypeError: pass return headers class SpackWebError(spack.error.SpackError): \"\"\"Superclass for Spack web spidering", "R, 'anges')) for A, sep, R in product('Aa', _separators, 'Rr')), \"Content-length\": set( ''.join((C,", "'ccept', sep, R, 'anges')) for A, sep, R in product('Aa', _separators, 'Rr')), \"Content-length\":", "T, 'ype')) for C, sep, T in product('Cc', _separators, 'Tt')), \"Date\": set(('Date', 'date')),", "be a bit more liberal and just look for the archive # part,", "spack -k, \" \"which will not check SSL certificates. Use this at your", "the corresponding item. If the keys of multiple items in headers map to", "operation # needs to be done in separate steps. shutil.copy2(local_file_path, remote_file_path) os.remove(local_file_path) elif", "like the archive URL. list_depth (int): Max depth to follow links on list_url", "each element is its corresponding element in headers, but mapped as above if", "after being standardized. In all other cases headers is returned unaltered. \"\"\" if", "sys.version_info[0] < 3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon processes\"\"\" Process = NonDaemonProcess", "daemon(self, value): pass if sys.version_info[0] < 3: class NonDaemonPool(multiprocessing.pool.Pool): \"\"\"Pool that uses non-daemon", "url_util.parse(url) local_path = url_util.local_file_path(url) if local_path: return os.listdir(local_path) if url.scheme == 's3': s3", "2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See", "processes to fetch the children, for much improved performance over a sequential fetch.", "not None: list_urls.add(list_url) for aurl in archive_urls: list_urls |= spack.url.find_list_urls(aurl) # Add '/'" ]
[ "the outputs out of the Destillate (Top Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane", "self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name,", "self._flowsheet.BLK_Get_FeedLocation() # D_Pressure = self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio()", "= self._flowsheet.BLK_Get_FeedLocation() # D_Pressure = self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio =", "= self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the outputs out of the Bottom (Bottom Stream) bots_ethane", "= ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications def set_column_specification(self, column_specification: ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages)", "value (per year) of a stream.\"\"\" raise NotImplementedError def stream_is_product(self, stream_specification: StreamSpecification, product_specification:", "specification.\"\"\" raise NotImplementedError if __name__ == '__main__': from hydrocarbon_problem.api.api_tests import test_api aspen_api =", "pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications def get_simulated_column_properties(self) ->", "values of Top streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the", "isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications def get_simulated_column_properties(self) -> ColumnOutputSpecification: # D_F_Location = self._flowsheet.BLK_Get_FeedLocation()", "hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI):", "= self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating", "product specification.\"\"\" raise NotImplementedError if __name__ == '__main__': from hydrocarbon_problem.api.api_tests import test_api aspen_api", "def get_simulated_column_properties(self) -> ColumnOutputSpecification: # D_F_Location = self._flowsheet.BLK_Get_FeedLocation() # D_Pressure = self._flowsheet.BLK_Get_Pressure() #", "str = \"S1\" self._tops_name: str = \"S2\" self._bottoms_name: str = \"S3\" self._name_to_aspen_name =", "stream.\"\"\" raise NotImplementedError def stream_is_product(self, stream_specification: StreamSpecification, product_specification: ProductSpecification) -> bool: \"\"\"Checks whether", "\"S2\" self._bottoms_name: str = \"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\")", "self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing all", "AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str = \"S1\" self._tops_name:", "from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty", "-> ColumnOutputSpecification: # D_F_Location = self._flowsheet.BLK_Get_FeedLocation() # D_Pressure = self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio =", "return tops_specifications, bots_specifications def get_simulated_column_properties(self) -> ColumnOutputSpecification: # D_F_Location = self._flowsheet.BLK_Get_FeedLocation() # D_Pressure", "D_F_Location = self._flowsheet.BLK_Get_FeedLocation() # D_Pressure = self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio", "# D_Pressure = self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty", "self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name,", "the Bottom Stream bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane))", "the variables to their respective \"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane,", "# D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter()", "physical values of Top streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring", "stream_is_product(self, stream_specification: StreamSpecification, product_specification: ProductSpecification) -> bool: \"\"\"Checks whether a stream meets the", "# D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty =", "Bottom (Bottom Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane =", "def solve_flowsheet(self) -> bool: self._flowsheet.Run() def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float: \"\"\"Calculates the", "self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing all the variables to their respective \"slot\" tops_specifications =", "outputs out of the Bottom (Bottom Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane =", "streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the outputs out of", "the Stream Composition for the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name,", "self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane)", "= self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing all the variables to their respective \"slot\" tops_specifications", "= self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating the Results of the", "-> bool: \"\"\"Checks whether a stream meets the product specification.\"\"\" raise NotImplementedError if", "input stream to a column to fit the stream specification\"\"\" # Defining the", "self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str = \"S1\" self._tops_name: str = \"S2\"", "\"\"\"Calculates the TAC of the column.\"\"\" raise NotImplementedError def get_stream_value(self, stream_specification: StreamSpecification) ->", "# Passing all the variables to their respective \"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure,", "stream to a column to fit the stream specification\"\"\" # Defining the Thermodynamic", "of Top streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the outputs", "# Tubulating the Results of the Bottom Stream bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane,", "isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None: \"\"\"Sets the input", "D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty,", "self._tops_name: str = \"S2\" self._bottoms_name: str = \"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\",", "self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane)", "Tuple from Simulation import Simulation from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from hydrocarbon_problem.api.types import StreamSpecification,", "the Bottom (Bottom Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane", "= self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications def set_column_specification(self, column_specification: ColumnInputSpecification)", "= self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the outputs out of the Bottom", "isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications def get_simulated_column_properties(self) -> ColumnOutputSpecification: # D_F_Location", "respective \"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) #", "# Acquiring the outputs out of the Bottom (Bottom Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name,", "= self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications =", "all the variables to their respective \"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane,", "Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane)", "self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane)", "the physical values of Top streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) #", "Stream bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications,", "Defining the Stream Composition for the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane)", "self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self)", "StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications def get_simulated_column_properties(self)", "D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications", "-> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) -> bool: self._flowsheet.Run() def", "self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the outputs out of the Destillate (Top", "Tuple[StreamSpecification, StreamSpecification]: # Getting the physical values of Top streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name)", "bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane", "VISIBILITY=False) self._feed_name: str = \"S1\" self._tops_name: str = \"S2\" self._bottoms_name: str = \"S3\"", "Results of the Bottom Stream bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane,", "def stream_is_product(self, stream_specification: StreamSpecification, product_specification: ProductSpecification) -> bool: \"\"\"Checks whether a stream meets", "pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting the physical values of", "Composition for the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane)", "(per year) of a stream.\"\"\" raise NotImplementedError def stream_is_product(self, stream_specification: StreamSpecification, product_specification: ProductSpecification)", "import StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def", "self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) -> bool: self._flowsheet.Run() def get_column_cost(self, column_specification:", "product_specification: ProductSpecification) -> bool: \"\"\"Checks whether a stream meets the product specification.\"\"\" raise", "= Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str = \"S1\" self._tops_name: str = \"S2\" self._bottoms_name: str", "out of the Bottom (Bottom Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name,", "from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class", "Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane)", "def set_column_specification(self, column_specification: ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self)", "self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane)", "raise NotImplementedError def get_stream_value(self, stream_specification: StreamSpecification) -> float: \"\"\"Calculates the value (per year)", "Destillate (Top Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane =", "ProductSpecification) -> bool: \"\"\"Checks whether a stream meets the product specification.\"\"\" raise NotImplementedError", "meets the product specification.\"\"\" raise NotImplementedError if __name__ == '__main__': from hydrocarbon_problem.api.api_tests import", "# D_F_Location = self._flowsheet.BLK_Get_FeedLocation() # D_Pressure = self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() #", "\\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet: Simulation", "n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None: \"\"\"Sets the input stream to a", "= StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting the physical", "StreamSpecification]: # Getting the physical values of Top streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure", "stream_specification: StreamSpecification) -> None: \"\"\"Sets the input stream to a column to fit", "PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None: \"\"\"Sets", "the input stream to a column to fit the stream specification\"\"\" # Defining", "ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) -> bool: self._flowsheet.Run()", "bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane", "self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing all the variables to their respective", "of the Bottom Stream bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane,", "= self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane =", "def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None: \"\"\"Sets the input stream to a column", "self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty,", "StreamSpecification, product_specification: ProductSpecification) -> bool: \"\"\"Checks whether a stream meets the product specification.\"\"\"", "values of Top streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the", "tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane", "tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) #", "self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]: # Getting the physical values", "propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting the physical values of Top streams", "= self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane =", "stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]: # Getting the physical values of Top", "raise NotImplementedError if __name__ == '__main__': from hydrocarbon_problem.api.api_tests import test_api aspen_api = AspenAPI()", "Defining the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining the Stream Composition", "tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting the", "of the column.\"\"\" raise NotImplementedError def get_stream_value(self, stream_specification: StreamSpecification) -> float: \"\"\"Calculates the", "column.\"\"\" raise NotImplementedError def get_stream_value(self, stream_specification: StreamSpecification) -> float: \"\"\"Calculates the value (per", "out of the Destillate (Top Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name,", "# Acquiring the outputs out of the Destillate (Top Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name,", "= self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter)", "self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) -> bool: self._flowsheet.Run() def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float:", "n_pentane=tops_n_pentane)) # Getting the physical values of Top streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure", "= self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing", "bots_specifications def get_simulated_column_properties(self) -> ColumnOutputSpecification: # D_F_Location = self._flowsheet.BLK_Get_FeedLocation() # D_Pressure = self._flowsheet.BLK_Get_Pressure()", "bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications", "tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing all the variables", "get_simulated_column_properties(self) -> ColumnOutputSpecification: # D_F_Location = self._flowsheet.BLK_Get_FeedLocation() # D_Pressure = self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio", "stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) ->", "of Top streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the outputs", "the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining the Stream Composition for", "Simulation from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification,", "-> None: \"\"\"Sets the input stream to a column to fit the stream", "def __init__(self): self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str = \"S1\" self._tops_name: str", "= self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane =", "self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the outputs out of the Destillate (Top Stream) tops_ethane =", "molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting the physical values of Top", "from Simulation import Simulation from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification,", "self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]:", "= self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane =", "of a stream.\"\"\" raise NotImplementedError def stream_is_product(self, stream_specification: StreamSpecification, product_specification: ProductSpecification) -> bool:", "propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None: \"\"\"Sets the", "the Destillate (Top Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane", "D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications", "(Top Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name,", "# Getting the physical values of Top streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure =", "None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) -> bool: self._flowsheet.Run() def get_column_cost(self,", "= \"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification:", "Tubulating the Results of the Bottom Stream bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane,", "stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]: # Getting the physical", "physical values of Top streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring", "-> float: \"\"\"Calculates the value (per year) of a stream.\"\"\" raise NotImplementedError def", "= self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications def", "self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane)", "self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating the Results of the Bottom Stream", "molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications def get_simulated_column_properties(self) -> ColumnOutputSpecification:", "their respective \"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane))", "NotImplementedError def stream_is_product(self, stream_specification: StreamSpecification, product_specification: ProductSpecification) -> bool: \"\"\"Checks whether a stream", "isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None: \"\"\"Sets the input stream to", "None: \"\"\"Sets the input stream to a column to fit the stream specification\"\"\"", "streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the outputs out of", "of the Destillate (Top Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane)", "Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining the Stream Composition for the", "tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane", "fit the stream specification\"\"\" # Defining the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure)", "self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty", "self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications def set_column_specification(self,", "\"\"\"Checks whether a stream meets the product specification.\"\"\" raise NotImplementedError if __name__ ==", "self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name,", "for the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name,", "self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane,", "return D_Specifications def set_column_specification(self, column_specification: ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio)", "n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications def get_simulated_column_properties(self) -> ColumnOutputSpecification: # D_F_Location = self._flowsheet.BLK_Get_FeedLocation() #", "n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None: \"\"\"Sets the input stream", "= 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str", "typing import Tuple from Simulation import Simulation from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from hydrocarbon_problem.api.types", "the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane,", "from typing import Tuple from Simulation import Simulation from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from", "ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications def set_column_specification(self, column_specification: ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location)", "self._flowsheet.Run() def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float: \"\"\"Calculates the TAC of the column.\"\"\"", "= PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None:", "(Bottom Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name,", "self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name,", "tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane", "str = \"S2\" self._bottoms_name: str = \"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\",", "import Simulation from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification,", "= \"S1\" self._tops_name: str = \"S2\" self._bottoms_name: str = \"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\",", "PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False) self._feed_name:", "'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str =", "Acquiring the outputs out of the Bottom (Bottom Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane)", "self._name_to_aspen_name.n_pentane) # Passing all the variables to their respective \"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature,", "self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications def set_column_specification(self, column_specification: ColumnInputSpecification) ->", "the TAC of the column.\"\"\" raise NotImplementedError def get_stream_value(self, stream_specification: StreamSpecification) -> float:", "stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane)", "= StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications def", "Simulation = Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str = \"S1\" self._tops_name: str = \"S2\" self._bottoms_name:", "column to fit the stream specification\"\"\" # Defining the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature)", "self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name,", "float: \"\"\"Calculates the TAC of the column.\"\"\" raise NotImplementedError def get_stream_value(self, stream_specification: StreamSpecification)", "raise NotImplementedError def stream_is_product(self, stream_specification: StreamSpecification, product_specification: ProductSpecification) -> bool: \"\"\"Checks whether a", "the stream specification\"\"\" # Defining the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) #", "get_stream_value(self, stream_specification: StreamSpecification) -> float: \"\"\"Calculates the value (per year) of a stream.\"\"\"", "isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting the physical values of Top streams bots_temperature", "self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return", "to fit the stream specification\"\"\" # Defining the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name,", "set_input_stream_specification(self, stream_specification: StreamSpecification) -> None: \"\"\"Sets the input stream to a column to", "tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the outputs out of the Destillate (Top Stream)", "whether a stream meets the product specification.\"\"\" raise NotImplementedError if __name__ == '__main__':", "self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane)", "Acquiring the outputs out of the Destillate (Top Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane)", "column_specification: ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) -> bool:", "self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating the Results of the Bottom", "self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the outputs out of the Bottom (Bottom", "stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]: #", "NotImplementedError def get_stream_value(self, stream_specification: StreamSpecification) -> float: \"\"\"Calculates the value (per year) of", "StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting the physical values", "ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet:", "self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]: # Getting the", "TAC of the column.\"\"\" raise NotImplementedError def get_stream_value(self, stream_specification: StreamSpecification) -> float: \"\"\"Calculates", "self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]: # Getting", "# Defining the Stream Composition for the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane,", "self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) -> bool: self._flowsheet.Run() def get_column_cost(self, column_specification: ColumnOutputSpecification) ->", "NotImplementedError if __name__ == '__main__': from hydrocarbon_problem.api.api_tests import test_api aspen_api = AspenAPI() test_api(aspen_api)", "a stream meets the product specification.\"\"\" raise NotImplementedError if __name__ == '__main__': from", "float: \"\"\"Calculates the value (per year) of a stream.\"\"\" raise NotImplementedError def stream_is_product(self,", "self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating the Results", "year) of a stream.\"\"\" raise NotImplementedError def stream_is_product(self, stream_specification: StreamSpecification, product_specification: ProductSpecification) ->", "set_column_specification(self, column_specification: ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) ->", "self._feed_name: str = \"S1\" self._tops_name: str = \"S2\" self._bottoms_name: str = \"S3\" self._name_to_aspen_name", "get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]: # Getting the physical values of Top streams tops_temperature", "# Getting the physical values of Top streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure =", "self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name,", "self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name,", "self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) -> Tuple[StreamSpecification,", "ColumnOutputSpecification) -> float: \"\"\"Calculates the TAC of the column.\"\"\" raise NotImplementedError def get_stream_value(self,", "ColumnOutputSpecification: # D_F_Location = self._flowsheet.BLK_Get_FeedLocation() # D_Pressure = self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio()", "D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications def set_column_specification(self, column_specification: ColumnInputSpecification) -> None:", "stream_specification: StreamSpecification, product_specification: ProductSpecification) -> bool: \"\"\"Checks whether a stream meets the product", "self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining the Stream Composition for the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane)", "self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def", "StreamSpecification) -> float: \"\"\"Calculates the value (per year) of a stream.\"\"\" raise NotImplementedError", "Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str = \"S1\" self._tops_name: str = \"S2\" self._bottoms_name: str =", "self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification: StreamSpecification) ->", "= self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing all the variables to", "bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the outputs out of the Bottom (Bottom Stream)", "Top streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the outputs out", "a stream.\"\"\" raise NotImplementedError def stream_is_product(self, stream_specification: StreamSpecification, product_specification: ProductSpecification) -> bool: \"\"\"Checks", "\"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self, stream_specification: StreamSpecification)", "Simulation import Simulation from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \\", "the value (per year) of a stream.\"\"\" raise NotImplementedError def stream_is_product(self, stream_specification: StreamSpecification,", "= self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter", "str = \"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def set_input_stream_specification(self,", "import Tuple from Simulation import Simulation from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from hydrocarbon_problem.api.types import", "Stream Composition for the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane,", "def get_stream_value(self, stream_specification: StreamSpecification) -> float: \"\"\"Calculates the value (per year) of a", "stream_specification: StreamSpecification) -> float: \"\"\"Calculates the value (per year) of a stream.\"\"\" raise", "self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty() D_Col_Diameter =", "the product specification.\"\"\" raise NotImplementedError if __name__ == '__main__': from hydrocarbon_problem.api.api_tests import test_api", "self._name_to_aspen_name.n_butane) tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing all the", "Bottom Stream bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return", "the column.\"\"\" raise NotImplementedError def get_stream_value(self, stream_specification: StreamSpecification) -> float: \"\"\"Calculates the value", "self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane) tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing all the variables to their", "a column to fit the stream specification\"\"\" # Defining the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name,", "the Results of the Bottom Stream bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure, molar_flows=PerCompoundProperty(ethane=bots_ethane, propane=bots_propane, isobutane=bots_isobutane,", "Passing all the variables to their respective \"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane,", "= self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty()", "bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating the Results of the Bottom Stream bots_specifications", "tops_specifications, bots_specifications def get_simulated_column_properties(self) -> ColumnOutputSpecification: # D_F_Location = self._flowsheet.BLK_Get_FeedLocation() # D_Pressure =", "solve_flowsheet(self) -> bool: self._flowsheet.Run() def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float: \"\"\"Calculates the TAC", "import BaseAspenDistillationAPI from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH =", "hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH", "stream_specification.pressure) # Defining the Stream Composition for the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane, stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name,", "class AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str = \"S1\"", "tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane) # Passing all the variables to their respective \"slot\"", "n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications def get_simulated_column_properties(self) -> ColumnOutputSpecification: # D_F_Location =", "Getting the physical values of Top streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name)", "self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating the", "\"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting", "StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def __init__(self):", "= self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the outputs out of the Destillate (Top Stream) tops_ethane", "def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float: \"\"\"Calculates the TAC of the column.\"\"\" raise", "Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane)", "= self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the outputs out of the Destillate", "self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name,", "= self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane =", "self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) -> bool: self._flowsheet.Run() def get_column_cost(self, column_specification: ColumnOutputSpecification)", "bool: self._flowsheet.Run() def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float: \"\"\"Calculates the TAC of the", "ProductSpecification, PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet: Simulation = Simulation(PATH=PATH,", "-> bool: self._flowsheet.Run() def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float: \"\"\"Calculates the TAC of", "def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]: # Getting the physical values of Top streams", "reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications def set_column_specification(self, column_specification: ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure)", "ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet: Simulation =", "diameter=D_Col_Diameter) return D_Specifications def set_column_specification(self, column_specification: ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio)", "bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the outputs out of the", "to a column to fit the stream specification\"\"\" # Defining the Thermodynamic Properties", "tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name) # Acquiring the outputs out of the", "outputs out of the Destillate (Top Stream) tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane) tops_propane =", "Getting the physical values of Top streams tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name) tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name)", "-> Tuple[StreamSpecification, StreamSpecification]: # Getting the physical values of Top streams tops_temperature =", "Top streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the outputs out", "\"S1\" self._tops_name: str = \"S2\" self._bottoms_name: str = \"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\",", "bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) #", "D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter() D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty, diameter=D_Col_Diameter) return D_Specifications def set_column_specification(self, column_specification:", "stream_specification.molar_flows.ethane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane)", "n_butane=tops_n_butane, isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting the physical values of Top streams bots_temperature =", "self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining the Stream Composition for the Feed self._flowsheet.STRM_Flowrate(self._feed_name,", "self._name_to_aspen_name.n_pentane, stream_specification.molar_flows.n_pentane) def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]: # Getting the physical values of", "bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane) bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating the Results of", "stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining the Stream Composition for the Feed self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane,", "self._name_to_aspen_name.n_pentane) # Tubulating the Results of the Bottom Stream bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure,", "self._name_to_aspen_name.propane, stream_specification.molar_flows.propane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane, stream_specification.molar_flows.isobutane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane, stream_specification.molar_flows.n_butane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane, stream_specification.molar_flows.isopentane) self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane,", "variables to their respective \"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane,", "__init__(self): self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False) self._feed_name: str = \"S1\" self._tops_name: str =", "self._bottoms_name: str = \"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\", n_pentane=\"N-PENTAN\") def", "of the Bottom (Bottom Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane)", "get_column_cost(self, column_specification: ColumnOutputSpecification) -> float: \"\"\"Calculates the TAC of the column.\"\"\" raise NotImplementedError", "Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining the Stream Composition for the Feed", "= self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane) tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane) tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane) tops_isopentane =", "D_Pressure = self._flowsheet.BLK_Get_Pressure() # D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty =", "= \"S2\" self._bottoms_name: str = \"S3\" self._name_to_aspen_name = PerCompoundProperty(ethane=\"ETHANE\", propane=\"PROPANE\", isobutane=\"I-BUTANE\", n_butane=\"N-BUTANE\", isopentane=\"I-PENTAN\",", "# Defining the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining the Stream", "bool: \"\"\"Checks whether a stream meets the product specification.\"\"\" raise NotImplementedError if __name__", "-> float: \"\"\"Calculates the TAC of the column.\"\"\" raise NotImplementedError def get_stream_value(self, stream_specification:", "D_Specifications def set_column_specification(self, column_specification: ColumnInputSpecification) -> None: self._flowsheet.BLK_NumberOfStages(column_specification.n_stages) self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location) self._flowsheet.BLK_Pressure(column_specification.condensor_pressure) self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio) self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def", "column_specification: ColumnOutputSpecification) -> float: \"\"\"Calculates the TAC of the column.\"\"\" raise NotImplementedError def", "\"\"\"Sets the input stream to a column to fit the stream specification\"\"\" #", "stream specification\"\"\" # Defining the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining", "isopentane=tops_isopentane, n_pentane=tops_n_pentane)) # Getting the physical values of Top streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name)", "stream meets the product specification.\"\"\" raise NotImplementedError if __name__ == '__main__': from hydrocarbon_problem.api.api_tests", "specification\"\"\" # Defining the Thermodynamic Properties self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature) self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure) # Defining the", "self._flowsheet.STRM_Get_Pressure(self._bottoms_name) # Acquiring the outputs out of the Bottom (Bottom Stream) bots_ethane =", "= self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating the Results of the Bottom Stream bots_specifications =", "self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane) # Tubulating the Results of the Bottom Stream bots_specifications = StreamSpecification(temperature=bots_temperature,", "D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio() # D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio() D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty() D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty()", "StreamSpecification) -> None: \"\"\"Sets the input stream to a column to fit the", "to their respective \"slot\" tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure, molar_flows=PerCompoundProperty(ethane=tops_ethane, propane=tops_propane, isobutane=tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane,", "BaseAspenDistillationAPI from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \\ ColumnOutputSpecification, ProductSpecification, PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp'", "self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio) def solve_flowsheet(self) -> bool: self._flowsheet.Run() def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float: \"\"\"Calculates", "propane=bots_propane, isobutane=bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane, n_pentane=bots_n_pentane)) return tops_specifications, bots_specifications def get_simulated_column_properties(self) -> ColumnOutputSpecification: #", "PerCompoundProperty PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp' class AspenAPI(BaseAspenDistillationAPI): def __init__(self): self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False)", "the outputs out of the Bottom (Bottom Stream) bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane) bots_propane", "the physical values of Top streams bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name) bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name) #", "bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane) bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane) bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane) bots_isopentane", "\"\"\"Calculates the value (per year) of a stream.\"\"\" raise NotImplementedError def stream_is_product(self, stream_specification:" ]
[ "unittest from rssparser.convert_module import rss_items_to_list, rss_to_dict class TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def", "def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def test_rss_to_dict(self): self.assertEqual(rss_to_dict([{}]), {'item1': {}}) if __name__ == '__main__':", "<filename>rssparser/tests/test_convert_module.py import unittest from rssparser.convert_module import rss_items_to_list, rss_to_dict class TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]),", "import unittest from rssparser.convert_module import rss_items_to_list, rss_to_dict class TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}])", "rss_items_to_list, rss_to_dict class TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def test_rss_to_dict(self): self.assertEqual(rss_to_dict([{}]), {'item1': {}})", "import rss_items_to_list, rss_to_dict class TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def test_rss_to_dict(self): self.assertEqual(rss_to_dict([{}]), {'item1':", "test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def test_rss_to_dict(self): self.assertEqual(rss_to_dict([{}]), {'item1': {}}) if __name__ == '__main__': unittest.main()", "rss_to_dict class TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def test_rss_to_dict(self): self.assertEqual(rss_to_dict([{}]), {'item1': {}}) if", "rssparser.convert_module import rss_items_to_list, rss_to_dict class TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def test_rss_to_dict(self): self.assertEqual(rss_to_dict([{}]),", "from rssparser.convert_module import rss_items_to_list, rss_to_dict class TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def test_rss_to_dict(self):", "TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def test_rss_to_dict(self): self.assertEqual(rss_to_dict([{}]), {'item1': {}}) if __name__ ==", "class TestConvertModule(unittest.TestCase): def test_rss_items_to_list(self): self.assertEqual(rss_items_to_list([{}]), [{}]) def test_rss_to_dict(self): self.assertEqual(rss_to_dict([{}]), {'item1': {}}) if __name__" ]
[ "uniform random from -n to n given output is multiplied by n \"\"\"", "derivative: x[x <= 0] = 0 x[x > 0] = 1 return x", "import random def ReLU(x, derivative=False): \"\"\" ReLU function with corresponding derivative \"\"\" if", "ReLU \"\"\" return random.uniform(0.005, 0.2) def uniform_random(): \"\"\" Generic uniform random from -n", "0 x[x > 0] = 1 return x x[x < 0] = 0", "return x def ReLU_uniform_random(): \"\"\" Ideal weight starting values for ReLU \"\"\" return", "with corresponding derivative \"\"\" if derivative: x[x <= 0] = 0 x[x >", "0.2) def uniform_random(): \"\"\" Generic uniform random from -n to n given output", "weight starting values for ReLU \"\"\" return random.uniform(0.005, 0.2) def uniform_random(): \"\"\" Generic", "= 0 x[x > 0] = 1 return x x[x < 0] =", "0] = 0 x[x > 0] = 1 return x x[x < 0]", "-n to n given output is multiplied by n \"\"\" return random.uniform(-1, 1)", "random def ReLU(x, derivative=False): \"\"\" ReLU function with corresponding derivative \"\"\" if derivative:", "0] = 1 return x x[x < 0] = 0 return x def", "from -n to n given output is multiplied by n \"\"\" return random.uniform(-1,", "corresponding derivative \"\"\" if derivative: x[x <= 0] = 0 x[x > 0]", "0 return x def ReLU_uniform_random(): \"\"\" Ideal weight starting values for ReLU \"\"\"", "def ReLU(x, derivative=False): \"\"\" ReLU function with corresponding derivative \"\"\" if derivative: x[x", "function with corresponding derivative \"\"\" if derivative: x[x <= 0] = 0 x[x", "= 0 return x def ReLU_uniform_random(): \"\"\" Ideal weight starting values for ReLU", "def ReLU_uniform_random(): \"\"\" Ideal weight starting values for ReLU \"\"\" return random.uniform(0.005, 0.2)", "\"\"\" Generic uniform random from -n to n given output is multiplied by", "> 0] = 1 return x x[x < 0] = 0 return x", "\"\"\" ReLU function with corresponding derivative \"\"\" if derivative: x[x <= 0] =", "x[x <= 0] = 0 x[x > 0] = 1 return x x[x", "values for ReLU \"\"\" return random.uniform(0.005, 0.2) def uniform_random(): \"\"\" Generic uniform random", "ReLU(x, derivative=False): \"\"\" ReLU function with corresponding derivative \"\"\" if derivative: x[x <=", "x[x > 0] = 1 return x x[x < 0] = 0 return", "x x[x < 0] = 0 return x def ReLU_uniform_random(): \"\"\" Ideal weight", "Ideal weight starting values for ReLU \"\"\" return random.uniform(0.005, 0.2) def uniform_random(): \"\"\"", "x[x < 0] = 0 return x def ReLU_uniform_random(): \"\"\" Ideal weight starting", "Generic uniform random from -n to n given output is multiplied by n", "derivative \"\"\" if derivative: x[x <= 0] = 0 x[x > 0] =", "def uniform_random(): \"\"\" Generic uniform random from -n to n given output is", "ReLU function with corresponding derivative \"\"\" if derivative: x[x <= 0] = 0", "uniform_random(): \"\"\" Generic uniform random from -n to n given output is multiplied", "derivative=False): \"\"\" ReLU function with corresponding derivative \"\"\" if derivative: x[x <= 0]", "for ReLU \"\"\" return random.uniform(0.005, 0.2) def uniform_random(): \"\"\" Generic uniform random from", "< 0] = 0 return x def ReLU_uniform_random(): \"\"\" Ideal weight starting values", "starting values for ReLU \"\"\" return random.uniform(0.005, 0.2) def uniform_random(): \"\"\" Generic uniform", "= 1 return x x[x < 0] = 0 return x def ReLU_uniform_random():", "return x x[x < 0] = 0 return x def ReLU_uniform_random(): \"\"\" Ideal", "\"\"\" Ideal weight starting values for ReLU \"\"\" return random.uniform(0.005, 0.2) def uniform_random():", "random.uniform(0.005, 0.2) def uniform_random(): \"\"\" Generic uniform random from -n to n given", "return random.uniform(0.005, 0.2) def uniform_random(): \"\"\" Generic uniform random from -n to n", "1 return x x[x < 0] = 0 return x def ReLU_uniform_random(): \"\"\"", "\"\"\" if derivative: x[x <= 0] = 0 x[x > 0] = 1", "ReLU_uniform_random(): \"\"\" Ideal weight starting values for ReLU \"\"\" return random.uniform(0.005, 0.2) def", "random from -n to n given output is multiplied by n \"\"\" return", "\"\"\" return random.uniform(0.005, 0.2) def uniform_random(): \"\"\" Generic uniform random from -n to", "<= 0] = 0 x[x > 0] = 1 return x x[x <", "if derivative: x[x <= 0] = 0 x[x > 0] = 1 return", "0] = 0 return x def ReLU_uniform_random(): \"\"\" Ideal weight starting values for", "x def ReLU_uniform_random(): \"\"\" Ideal weight starting values for ReLU \"\"\" return random.uniform(0.005," ]
[ "**kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = [] self.bg_layer = self[0] self.fixed_layer = self[1]", "clear(self): self.fixed_layer.clear_rect(0, 0, self.width, self.height) def mouse_down(self, x, y): self.mouse_is_down = True self.rect", "on the canvas. \"\"\" x1, y1, x2, y2 = self.rect if not coords", "x, y): self.mouse_is_down = False self.animated_layer.clear_rect(0, 0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self,", "self.update() def add_image(self, img): \":param img: raw byte data of image\" self.bg_layer.draw_image(img) self.update()", "0, self.width, self.height) def mouse_down(self, x, y): self.mouse_is_down = True self.rect = [x,", "the canvas. \"\"\" x1, y1, x2, y2 = self.rect if not coords else", "y1, x2, y2 = self.rect if not coords else coords x = min(x1,", "coords x = min(x1, x2) y = min(y1, y2) w = abs(x2 -", "self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0, 0, self.width, self.height) def mouse_down(self, x, y): self.mouse_is_down", "self.rect = [x, y, x + 1, y + 1] self.draw_rect() def mouse_move(self,", "img): \":param img: raw byte data of image\" self.bg_layer.draw_image(img) self.update() def draw_many(self, rects):", "} class PdfCanvas(MultiCanvas): def __init__(self, **kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = [] self.bg_layer", "{ \"section\": {\"color\": \"blue\"}, \"text\": {\"color\": \"black\"}, \"image\": {\"color\": \"red\"}, \"pdf\": {\"color\": \"black\"},", "Unused \"table\": {\"color\": \"green\"}, } class PdfCanvas(MultiCanvas): def __init__(self, **kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\")", "is needed to draw the box on the canvas. \"\"\" x1, y1, x2,", "know why, but this is needed in order to allow animated_layer to update", "ipyevents (and PIL) uses xyxy, so conversion is needed to draw the box", "= False def update(self): \"\"\" I don't know why, but this is needed", "uses xyxy, so conversion is needed to draw the box on the canvas.", "min(x1, x2) y = min(y1, y2) w = abs(x2 - x1) h =", "\"blue\"}, \"text\": {\"color\": \"black\"}, \"image\": {\"color\": \"red\"}, \"pdf\": {\"color\": \"black\"}, # Unused \"folder\":", "__init__(self, **kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = [] self.bg_layer = self[0] self.fixed_layer =", "allow animated_layer to update correctly after making a change to any other layer", "abs(y2 - y1) return x, y, w, h def draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width,", "self.draw_rect() def mouse_up(self, x, y): self.mouse_is_down = False self.animated_layer.clear_rect(0, 0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh())", "rects): self.clear() for coords, _type in rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def", "def mouse_up(self, x, y): self.mouse_is_down = False self.animated_layer.clear_rect(0, 0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update()", "1, y + 1] self.draw_rect() def mouse_move(self, x, y): if self.mouse_is_down: self.rect[2] =", "to update correctly after making a change to any other layer \"\"\" self._canvases", "self.rect[3] = y self.draw_rect() def mouse_up(self, x, y): self.mouse_is_down = False self.animated_layer.clear_rect(0, 0,", "self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0, 0, self.width, self.height) def mouse_down(self, x,", "byte data of image\" self.bg_layer.draw_image(img) self.update() def draw_many(self, rects): self.clear() for coords, _type", "needed to draw the box on the canvas. \"\"\" x1, y1, x2, y2", "self.fixed_layer.clear_rect(0, 0, self.width, self.height) def mouse_down(self, x, y): self.mouse_is_down = True self.rect =", "x = min(x1, x2) y = min(y1, y2) w = abs(x2 - x1)", "\"table\": {\"color\": \"green\"}, } class PdfCanvas(MultiCanvas): def __init__(self, **kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes", "\"text\": {\"color\": \"black\"}, \"image\": {\"color\": \"red\"}, \"pdf\": {\"color\": \"black\"}, # Unused \"folder\": {\"color\":", "\"black\"}, # Unused \"table\": {\"color\": \"green\"}, } class PdfCanvas(MultiCanvas): def __init__(self, **kwargs): super().__init__(3,", "self.bg_layer = self[0] self.fixed_layer = self[1] self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect", "coords=None): \"\"\" ipycanvas requires xywh coords, but ipyevents (and PIL) uses xyxy, so", "self.rect[2] = x self.rect[3] = y self.draw_rect() def mouse_up(self, x, y): self.mouse_is_down =", "to allow animated_layer to update correctly after making a change to any other", "other layer \"\"\" self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer] def xywh(self, coords=None): \"\"\" ipycanvas", "abs(x2 - x1) h = abs(y2 - y1) return x, y, w, h", "# Unused \"folder\": {\"color\": \"black\"}, # Unused \"table\": {\"color\": \"green\"}, } class PdfCanvas(MultiCanvas):", "= abs(y2 - y1) return x, y, w, h def draw_rect(self): self.animated_layer.clear_rect(0, 0,", "\"\"\" self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer] def xywh(self, coords=None): \"\"\" ipycanvas requires xywh", "CANVAS_TYPE_KWARGS = { \"section\": {\"color\": \"blue\"}, \"text\": {\"color\": \"black\"}, \"image\": {\"color\": \"red\"}, \"pdf\":", "any other layer \"\"\" self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer] def xywh(self, coords=None): \"\"\"", "self.clear() for coords, _type in rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self,", "= self.rect if not coords else coords x = min(x1, x2) y =", "= False self.animated_layer.clear_rect(0, 0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self, img): \":param img:", "{\"color\": \"black\"}, # Unused \"folder\": {\"color\": \"black\"}, # Unused \"table\": {\"color\": \"green\"}, }", "1] self.draw_rect() def mouse_move(self, x, y): if self.mouse_is_down: self.rect[2] = x self.rect[3] =", "\"\"\" ipycanvas requires xywh coords, but ipyevents (and PIL) uses xyxy, so conversion", "so conversion is needed to draw the box on the canvas. \"\"\" x1,", "mouse_move(self, x, y): if self.mouse_is_down: self.rect[2] = x self.rect[3] = y self.draw_rect() def", "self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None self.mouse_is_down = False def", "self.mouse_is_down: self.rect[2] = x self.rect[3] = y self.draw_rect() def mouse_up(self, x, y): self.mouse_is_down", "for coords, _type in rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self, _type:", "after making a change to any other layer \"\"\" self._canvases = [self.bg_layer, self.fixed_layer,", "requires xywh coords, but ipyevents (and PIL) uses xyxy, so conversion is needed", "self[0] self.fixed_layer = self[1] self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None", "h = abs(y2 - y1) return x, y, w, h def draw_rect(self): self.animated_layer.clear_rect(0,", "\"black\"}, # Unused \"folder\": {\"color\": \"black\"}, # Unused \"table\": {\"color\": \"green\"}, } class", "= y self.draw_rect() def mouse_up(self, x, y): self.mouse_is_down = False self.animated_layer.clear_rect(0, 0, self.width,", "PIL) uses xyxy, so conversion is needed to draw the box on the", "from ipycanvas import MultiCanvas CANVAS_TYPE_KWARGS = { \"section\": {\"color\": \"blue\"}, \"text\": {\"color\": \"black\"},", "draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0, 0, self.width, self.height)", "self.mouse_is_down = True self.rect = [x, y, x + 1, y + 1]", "MultiCanvas CANVAS_TYPE_KWARGS = { \"section\": {\"color\": \"blue\"}, \"text\": {\"color\": \"black\"}, \"image\": {\"color\": \"red\"},", "def mouse_down(self, x, y): self.mouse_is_down = True self.rect = [x, y, x +", "= min(y1, y2) w = abs(x2 - x1) h = abs(y2 - y1)", "conversion is needed to draw the box on the canvas. \"\"\" x1, y1,", "self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None self.mouse_is_down = False def update(self): \"\"\"", "def clear(self): self.fixed_layer.clear_rect(0, 0, self.width, self.height) def mouse_down(self, x, y): self.mouse_is_down = True", "y + 1] self.draw_rect() def mouse_move(self, x, y): if self.mouse_is_down: self.rect[2] = x", "if self.mouse_is_down: self.rect[2] = x self.rect[3] = y self.draw_rect() def mouse_up(self, x, y):", "self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self, img): \":param img: raw byte data of image\" self.bg_layer.draw_image(img)", "mouse_down(self, x, y): self.mouse_is_down = True self.rect = [x, y, x + 1,", "= True self.rect = [x, y, x + 1, y + 1] self.draw_rect()", "data of image\" self.bg_layer.draw_image(img) self.update() def draw_many(self, rects): self.clear() for coords, _type in", "_type in rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self, _type: str): self._type", "\":param img: raw byte data of image\" self.bg_layer.draw_image(img) self.update() def draw_many(self, rects): self.clear()", "self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self, _type: str): self._type = _type self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.animated_layer.stroke_style", "but this is needed in order to allow animated_layer to update correctly after", "\"red\"}, \"pdf\": {\"color\": \"black\"}, # Unused \"folder\": {\"color\": \"black\"}, # Unused \"table\": {\"color\":", "to any other layer \"\"\" self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer] def xywh(self, coords=None):", "True self.rect = [x, y, x + 1, y + 1] self.draw_rect() def", "self.fixed_layer = self[1] self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None self.mouse_is_down", "min(y1, y2) w = abs(x2 - x1) h = abs(y2 - y1) return", "self.draw_rect() def mouse_move(self, x, y): if self.mouse_is_down: self.rect[2] = x self.rect[3] = y", "don't know why, but this is needed in order to allow animated_layer to", "to draw the box on the canvas. \"\"\" x1, y1, x2, y2 =", "0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0, 0, self.width, self.height) def mouse_down(self,", "def mouse_move(self, x, y): if self.mouse_is_down: self.rect[2] = x self.rect[3] = y self.draw_rect()", "y = min(y1, y2) w = abs(x2 - x1) h = abs(y2 -", "x + 1, y + 1] self.draw_rect() def mouse_move(self, x, y): if self.mouse_is_down:", "= { \"section\": {\"color\": \"blue\"}, \"text\": {\"color\": \"black\"}, \"image\": {\"color\": \"red\"}, \"pdf\": {\"color\":", "self.height) def mouse_down(self, x, y): self.mouse_is_down = True self.rect = [x, y, x", "needed in order to allow animated_layer to update correctly after making a change", "None self.mouse_is_down = False def update(self): \"\"\" I don't know why, but this", "coords, but ipyevents (and PIL) uses xyxy, so conversion is needed to draw", "def xywh(self, coords=None): \"\"\" ipycanvas requires xywh coords, but ipyevents (and PIL) uses", "[x, y, x + 1, y + 1] self.draw_rect() def mouse_move(self, x, y):", "= [] self.bg_layer = self[0] self.fixed_layer = self[1] self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move)", "= min(x1, x2) y = min(y1, y2) w = abs(x2 - x1) h", "in order to allow animated_layer to update correctly after making a change to", "if not coords else coords x = min(x1, x2) y = min(y1, y2)", "making a change to any other layer \"\"\" self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer]", "change to any other layer \"\"\" self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer] def xywh(self,", "class PdfCanvas(MultiCanvas): def __init__(self, **kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = [] self.bg_layer =", "self.update() def draw_many(self, rects): self.clear() for coords, _type in rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"]", "\"image\": {\"color\": \"red\"}, \"pdf\": {\"color\": \"black\"}, # Unused \"folder\": {\"color\": \"black\"}, # Unused", "def draw_many(self, rects): self.clear() for coords, _type in rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords))", "= abs(x2 - x1) h = abs(y2 - y1) return x, y, w,", "this is needed in order to allow animated_layer to update correctly after making", "layer \"\"\" self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer] def xywh(self, coords=None): \"\"\" ipycanvas requires", "y2) w = abs(x2 - x1) h = abs(y2 - y1) return x,", "h def draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0, 0,", "a change to any other layer \"\"\" self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer] def", "self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = [] self.bg_layer = self[0] self.fixed_layer = self[1] self.animated_layer = self[2]", "y): self.mouse_is_down = True self.rect = [x, y, x + 1, y +", "+ 1] self.draw_rect() def mouse_move(self, x, y): if self.mouse_is_down: self.rect[2] = x self.rect[3]", "raw byte data of image\" self.bg_layer.draw_image(img) self.update() def draw_many(self, rects): self.clear() for coords,", "in rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self, _type: str): self._type =", "w, h def draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0,", "not coords else coords x = min(x1, x2) y = min(y1, y2) w", "draw_many(self, rects): self.clear() for coords, _type in rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update()", "self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self, img): \":param img: raw byte data of image\"", "y self.draw_rect() def mouse_up(self, x, y): self.mouse_is_down = False self.animated_layer.clear_rect(0, 0, self.width, self.height)", "self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self, _type: str): self._type = _type self.fixed_layer.stroke_style", "update correctly after making a change to any other layer \"\"\" self._canvases =", "add_image(self, img): \":param img: raw byte data of image\" self.bg_layer.draw_image(img) self.update() def draw_many(self,", "y): self.mouse_is_down = False self.animated_layer.clear_rect(0, 0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self, img):", "self.mouse_is_down = False self.animated_layer.clear_rect(0, 0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self, img): \":param", "canvas. \"\"\" x1, y1, x2, y2 = self.rect if not coords else coords", "self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0, 0, self.width, self.height) def mouse_down(self, x, y): self.mouse_is_down =", "y, x + 1, y + 1] self.draw_rect() def mouse_move(self, x, y): if", "{\"color\": \"black\"}, \"image\": {\"color\": \"red\"}, \"pdf\": {\"color\": \"black\"}, # Unused \"folder\": {\"color\": \"black\"},", "x1) h = abs(y2 - y1) return x, y, w, h def draw_rect(self):", "x, y): if self.mouse_is_down: self.rect[2] = x self.rect[3] = y self.draw_rect() def mouse_up(self,", "self.animated_layer] def xywh(self, coords=None): \"\"\" ipycanvas requires xywh coords, but ipyevents (and PIL)", "self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0, 0, self.width, self.height) def mouse_down(self, x, y):", "0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self, img): \":param img: raw byte data", "False self.animated_layer.clear_rect(0, 0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self, img): \":param img: raw", "= self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None self.mouse_is_down = False def update(self):", "- x1) h = abs(y2 - y1) return x, y, w, h def", "{\"color\": \"black\"}, # Unused \"table\": {\"color\": \"green\"}, } class PdfCanvas(MultiCanvas): def __init__(self, **kwargs):", "self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None self.mouse_is_down = False def update(self): \"\"\" I don't know", "x2, y2 = self.rect if not coords else coords x = min(x1, x2)", "= x self.rect[3] = y self.draw_rect() def mouse_up(self, x, y): self.mouse_is_down = False", "mouse_up(self, x, y): self.mouse_is_down = False self.animated_layer.clear_rect(0, 0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def", "def draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0, 0, self.width,", "animated_layer to update correctly after making a change to any other layer \"\"\"", "self.width, self.height) def mouse_down(self, x, y): self.mouse_is_down = True self.rect = [x, y,", "ipycanvas requires xywh coords, but ipyevents (and PIL) uses xyxy, so conversion is", "- y1) return x, y, w, h def draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width, self.height)", "xyxy, so conversion is needed to draw the box on the canvas. \"\"\"", "self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None self.mouse_is_down = False def update(self): \"\"\" I don't", "why, but this is needed in order to allow animated_layer to update correctly", "False def update(self): \"\"\" I don't know why, but this is needed in", "order to allow animated_layer to update correctly after making a change to any", "def add_image(self, img): \":param img: raw byte data of image\" self.bg_layer.draw_image(img) self.update() def", "y1) return x, y, w, h def draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh())", "= self[1] self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None self.mouse_is_down =", "x2) y = min(y1, y2) w = abs(x2 - x1) h = abs(y2", "return x, y, w, h def draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\")", "{\"color\": \"blue\"}, \"text\": {\"color\": \"black\"}, \"image\": {\"color\": \"red\"}, \"pdf\": {\"color\": \"black\"}, # Unused", "update(self): \"\"\" I don't know why, but this is needed in order to", "\"green\"}, } class PdfCanvas(MultiCanvas): def __init__(self, **kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = []", "coords else coords x = min(x1, x2) y = min(y1, y2) w =", "is needed in order to allow animated_layer to update correctly after making a", "self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self, img): \":param img: raw byte data of", "super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = [] self.bg_layer = self[0] self.fixed_layer = self[1] self.animated_layer", "image\" self.bg_layer.draw_image(img) self.update() def draw_many(self, rects): self.clear() for coords, _type in rects: self.fixed_layer.stroke_style", "self.bg_layer.draw_image(img) self.update() def draw_many(self, rects): self.clear() for coords, _type in rects: self.fixed_layer.stroke_style =", "self.rect = None self.mouse_is_down = False def update(self): \"\"\" I don't know why,", "# Unused \"table\": {\"color\": \"green\"}, } class PdfCanvas(MultiCanvas): def __init__(self, **kwargs): super().__init__(3, **kwargs)", "\"\"\" x1, y1, x2, y2 = self.rect if not coords else coords x", "CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self, _type: str): self._type = _type self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"]", "ipycanvas import MultiCanvas CANVAS_TYPE_KWARGS = { \"section\": {\"color\": \"blue\"}, \"text\": {\"color\": \"black\"}, \"image\":", "def __init__(self, **kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = [] self.bg_layer = self[0] self.fixed_layer", "\"black\"}, \"image\": {\"color\": \"red\"}, \"pdf\": {\"color\": \"black\"}, # Unused \"folder\": {\"color\": \"black\"}, #", "the box on the canvas. \"\"\" x1, y1, x2, y2 = self.rect if", "def set_type(self, _type: str): self._type = _type self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.animated_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"]", "def update(self): \"\"\" I don't know why, but this is needed in order", "x, y): self.mouse_is_down = True self.rect = [x, y, x + 1, y", "\"section\": {\"color\": \"blue\"}, \"text\": {\"color\": \"black\"}, \"image\": {\"color\": \"red\"}, \"pdf\": {\"color\": \"black\"}, #", "self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer] def xywh(self, coords=None): \"\"\" ipycanvas requires xywh coords,", "xywh(self, coords=None): \"\"\" ipycanvas requires xywh coords, but ipyevents (and PIL) uses xyxy,", "{\"color\": \"green\"}, } class PdfCanvas(MultiCanvas): def __init__(self, **kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes =", "correctly after making a change to any other layer \"\"\" self._canvases = [self.bg_layer,", "= CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self, _type: str): self._type = _type self.fixed_layer.stroke_style =", "self.animated_layer.clear_rect(0, 0, self.width, self.height) self.fixed_layer.stroke_rect(*self.xywh()) self.update() def add_image(self, img): \":param img: raw byte", "**kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = [] self.bg_layer = self[0] self.fixed_layer = self[1] self.animated_layer =", "of image\" self.bg_layer.draw_image(img) self.update() def draw_many(self, rects): self.clear() for coords, _type in rects:", "\"\"\" I don't know why, but this is needed in order to allow", "import MultiCanvas CANVAS_TYPE_KWARGS = { \"section\": {\"color\": \"blue\"}, \"text\": {\"color\": \"black\"}, \"image\": {\"color\":", "y): if self.mouse_is_down: self.rect[2] = x self.rect[3] = y self.draw_rect() def mouse_up(self, x,", "img: raw byte data of image\" self.bg_layer.draw_image(img) self.update() def draw_many(self, rects): self.clear() for", "self[1] self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None self.mouse_is_down = False", "= [x, y, x + 1, y + 1] self.draw_rect() def mouse_move(self, x,", "rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self, _type: str): self._type = _type", "self.bboxes = [] self.bg_layer = self[0] self.fixed_layer = self[1] self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down)", "self.mouse_is_down = False def update(self): \"\"\" I don't know why, but this is", "coords, _type in rects: self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.fixed_layer.stroke_rect(*self.xywh(coords)) self.update() def set_type(self, _type: str):", "= [self.bg_layer, self.fixed_layer, self.animated_layer] def xywh(self, coords=None): \"\"\" ipycanvas requires xywh coords, but", "[] self.bg_layer = self[0] self.fixed_layer = self[1] self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up)", "PdfCanvas(MultiCanvas): def __init__(self, **kwargs): super().__init__(3, **kwargs) self.add_class(\"ipypdf-pdf-canvas\") self.bboxes = [] self.bg_layer = self[0]", "self.rect if not coords else coords x = min(x1, x2) y = min(y1,", "x, y, w, h def draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def", "= None self.mouse_is_down = False def update(self): \"\"\" I don't know why, but", "{\"color\": \"red\"}, \"pdf\": {\"color\": \"black\"}, # Unused \"folder\": {\"color\": \"black\"}, # Unused \"table\":", "y, w, h def draw_rect(self): self.animated_layer.clear_rect(0, 0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self):", "(and PIL) uses xyxy, so conversion is needed to draw the box on", "\"pdf\": {\"color\": \"black\"}, # Unused \"folder\": {\"color\": \"black\"}, # Unused \"table\": {\"color\": \"green\"},", "Unused \"folder\": {\"color\": \"black\"}, # Unused \"table\": {\"color\": \"green\"}, } class PdfCanvas(MultiCanvas): def", "y2 = self.rect if not coords else coords x = min(x1, x2) y", "+ 1, y + 1] self.draw_rect() def mouse_move(self, x, y): if self.mouse_is_down: self.rect[2]", "I don't know why, but this is needed in order to allow animated_layer", "[self.bg_layer, self.fixed_layer, self.animated_layer] def xywh(self, coords=None): \"\"\" ipycanvas requires xywh coords, but ipyevents", "self.animated_layer.clear_rect(0, 0, self.width, self.height) self.animated_layer.stroke_rect(*self.xywh()) self.add_class(\"ipypdf-pdf-canvas\") def clear(self): self.fixed_layer.clear_rect(0, 0, self.width, self.height) def", "self.update() def set_type(self, _type: str): self._type = _type self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type][\"color\"] self.animated_layer.stroke_style =", "x1, y1, x2, y2 = self.rect if not coords else coords x =", "box on the canvas. \"\"\" x1, y1, x2, y2 = self.rect if not", "draw the box on the canvas. \"\"\" x1, y1, x2, y2 = self.rect", "but ipyevents (and PIL) uses xyxy, so conversion is needed to draw the", "w = abs(x2 - x1) h = abs(y2 - y1) return x, y,", "= self[0] self.fixed_layer = self[1] self.animated_layer = self[2] self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect =", "self.fixed_layer, self.animated_layer] def xywh(self, coords=None): \"\"\" ipycanvas requires xywh coords, but ipyevents (and", "self.animated_layer.on_mouse_down(self.mouse_down) self.animated_layer.on_mouse_move(self.mouse_move) self.animated_layer.on_mouse_up(self.mouse_up) self.rect = None self.mouse_is_down = False def update(self): \"\"\" I", "xywh coords, but ipyevents (and PIL) uses xyxy, so conversion is needed to", "else coords x = min(x1, x2) y = min(y1, y2) w = abs(x2", "x self.rect[3] = y self.draw_rect() def mouse_up(self, x, y): self.mouse_is_down = False self.animated_layer.clear_rect(0,", "\"folder\": {\"color\": \"black\"}, # Unused \"table\": {\"color\": \"green\"}, } class PdfCanvas(MultiCanvas): def __init__(self," ]
[ "lvf.src(ts_in) if ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter() vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)])", "Babylonia/ac_Babylonia_03.py #!/usr/bin/env python3 import vapoursynth as vs import audiocutter import lvsfunc as lvf", "vapoursynth as vs import audiocutter import lvsfunc as lvf from subprocess import call", "audiocutter.AudioCutter() vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate", "#!/usr/bin/env python3 import vapoursynth as vs import audiocutter import lvsfunc as lvf from", "Demonic Front - Babylonia/ac_Babylonia_03.py #!/usr/bin/env python3 import vapoursynth as vs import audiocutter import", "vs import audiocutter import lvsfunc as lvf from subprocess import call core =", "ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai Majuu Sensen", "Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX) T112 stereo 244 kbps", "as vs import audiocutter import lvsfunc as lvf from subprocess import call core", "= vs.core ts_in = r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03", "vid.set_output(0) if __name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia", "Order_ Zettai Majuu Sensen Babylonia - 03 (MX) T112 stereo 244 kbps DELAY", "python3 import vapoursynth as vs import audiocutter import lvsfunc as lvf from subprocess", "lvsfunc as lvf from subprocess import call core = vs.core ts_in = r'03/Fate", "Order_ Zettai Majuu Sensen Babylonia - 03 (MX).d2v' src = lvf.src(ts_in) if ts_in.endswith('d2v'):", "if __name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia -", "ts_in = r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX).d2v' src", "ac = audiocutter.AudioCutter() vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__ == \"__main__\":", "Absolute Demonic Front - Babylonia/ac_Babylonia_03.py #!/usr/bin/env python3 import vapoursynth as vs import audiocutter", "Babylonia - 03 (MX).d2v' src = lvf.src(ts_in) if ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src) ac", "vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand", "== \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX)", "= audiocutter.AudioCutter() vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a',", "from subprocess import call core = vs.core ts_in = r'03/Fate Grand Order_ Zettai", "core = vs.core ts_in = r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia -", "= lvf.src(ts_in) if ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter() vid = ac.split(src,", "import audiocutter import lvsfunc as lvf from subprocess import call core = vs.core", "Zettai Majuu Sensen Babylonia - 03 (MX).d2v' src = lvf.src(ts_in) if ts_in.endswith('d2v'): src", "03 (MX).d2v' src = lvf.src(ts_in) if ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter()", "call core = vs.core ts_in = r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia", "[(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai Majuu", "= ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_", "audio_source=r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX) T112 stereo 244", "__name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03", "core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter() vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__ ==", "\"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX) T112", "- Absolute Demonic Front - Babylonia/ac_Babylonia_03.py #!/usr/bin/env python3 import vapoursynth as vs import", "import call core = vs.core ts_in = r'03/Fate Grand Order_ Zettai Majuu Sensen", "r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX).d2v' src = lvf.src(ts_in)", "- 03 (MX).d2v' src = lvf.src(ts_in) if ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src) ac =", "(MX).d2v' src = lvf.src(ts_in) if ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter() vid", "Majuu Sensen Babylonia - 03 (MX) T112 stereo 244 kbps DELAY -356 ms.aac')", "src = core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter() vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if", "as lvf from subprocess import call core = vs.core ts_in = r'03/Fate Grand", "Front - Babylonia/ac_Babylonia_03.py #!/usr/bin/env python3 import vapoursynth as vs import audiocutter import lvsfunc", "lvf from subprocess import call core = vs.core ts_in = r'03/Fate Grand Order_", "subprocess import call core = vs.core ts_in = r'03/Fate Grand Order_ Zettai Majuu", "Majuu Sensen Babylonia - 03 (MX).d2v' src = lvf.src(ts_in) if ts_in.endswith('d2v'): src =", "ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX) T112 stereo", "Sensen Babylonia - 03 (MX).d2v' src = lvf.src(ts_in) if ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src)", "if ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter() vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid)", "ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter() vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0)", "import vapoursynth as vs import audiocutter import lvsfunc as lvf from subprocess import", "<filename>[Kaleido-subs]/Dropped/FGO - Absolute Demonic Front - Babylonia/ac_Babylonia_03.py #!/usr/bin/env python3 import vapoursynth as vs", "import lvsfunc as lvf from subprocess import call core = vs.core ts_in =", "Zettai Majuu Sensen Babylonia - 03 (MX) T112 stereo 244 kbps DELAY -356", "= core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter() vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__", "Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX).d2v' src = lvf.src(ts_in) if", "= r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX).d2v' src =", "vs.core ts_in = r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX).d2v'", "audiocutter import lvsfunc as lvf from subprocess import call core = vs.core ts_in", "- Babylonia/ac_Babylonia_03.py #!/usr/bin/env python3 import vapoursynth as vs import audiocutter import lvsfunc as", "ac.split(src, [(809,3542),(5223,18815),(20256,37971)]) ac.ready_qp_and_chapters(vid) vid.set_output(0) if __name__ == \"__main__\": ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai", "src = lvf.src(ts_in) if ts_in.endswith('d2v'): src = core.vivtc.VDecimate(src) ac = audiocutter.AudioCutter() vid =" ]
[ "sys from src.message import message class message_test(unittest.TestCase): def test_from_message_record(self): message_record = message( msg_id=185,", "= message( msg_id=185, channel_id=82, source_id=50, source_chat_id='111111', msg='Hello world') row = message_record.str().split(',') row =", "channel_id=82, source_id=50, source_chat_id='111111', msg='Hello world') row = message_record.str().split(',') row = [e.replace(\"'\", \"\") if", "test message_record_from_row = message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id) self.assertEqual('', message_record_from_row.source_chat_id) self.assertEqual('',", "self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative test message_record_from_row =", "message_record_from_row = message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id) self.assertEqual('', message_record_from_row.source_chat_id) self.assertEqual('', message_record_from_row.msg)", "message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id) self.assertEqual('', message_record_from_row.source_chat_id) self.assertEqual('', message_record_from_row.msg) if __name__", "if e.find(\"'\") > -1 else int(e) for e in row] message_record_from_row = message.from_message_record(row,", "> -1 else int(e) for e in row] message_record_from_row = message.from_message_record(row, False) ##", "= [e.replace(\"'\", \"\") if e.find(\"'\") > -1 else int(e) for e in row]", "row] message_record_from_row = message.from_message_record(row, False) ## Positive test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id,", "self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative test message_record_from_row = message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0,", "## Negative test message_record_from_row = message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id) self.assertEqual('',", "unittest import logging import os import sys from src.message import message class message_test(unittest.TestCase):", "row = message_record.str().split(',') row = [e.replace(\"'\", \"\") if e.find(\"'\") > -1 else int(e)", "message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg)", "in row] message_record_from_row = message.from_message_record(row, False) ## Positive test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time)", "## Positive test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id)", "self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ##", "message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id) self.assertEqual('', message_record_from_row.source_chat_id) self.assertEqual('', message_record_from_row.msg) if __name__ == '__main__':", "def test_from_message_record(self): message_record = message( msg_id=185, channel_id=82, source_id=50, source_chat_id='111111', msg='Hello world') row =", "msg_id=185, channel_id=82, source_id=50, source_chat_id='111111', msg='Hello world') row = message_record.str().split(',') row = [e.replace(\"'\", \"\")", "message class message_test(unittest.TestCase): def test_from_message_record(self): message_record = message( msg_id=185, channel_id=82, source_id=50, source_chat_id='111111', msg='Hello", "self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative test", "message_record = message( msg_id=185, channel_id=82, source_id=50, source_chat_id='111111', msg='Hello world') row = message_record.str().split(',') row", "class message_test(unittest.TestCase): def test_from_message_record(self): message_record = message( msg_id=185, channel_id=82, source_id=50, source_chat_id='111111', msg='Hello world')", "self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id) self.assertEqual('', message_record_from_row.source_chat_id) self.assertEqual('', message_record_from_row.msg) if __name__ == '__main__': unittest.main()", "message_record_from_row = message.from_message_record(row, False) ## Positive test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id)", "msg='Hello world') row = message_record.str().split(',') row = [e.replace(\"'\", \"\") if e.find(\"'\") > -1", "test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id)", "import os import sys from src.message import message class message_test(unittest.TestCase): def test_from_message_record(self): message_record", "source_id=50, source_chat_id='111111', msg='Hello world') row = message_record.str().split(',') row = [e.replace(\"'\", \"\") if e.find(\"'\")", "self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative test message_record_from_row = message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0,", "message_record_from_row.msg) ## Negative test message_record_from_row = message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id)", "self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id) self.assertEqual('', message_record_from_row.source_chat_id) self.assertEqual('', message_record_from_row.msg) if __name__ ==", "e in row] message_record_from_row = message.from_message_record(row, False) ## Positive test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time,", "e.find(\"'\") > -1 else int(e) for e in row] message_record_from_row = message.from_message_record(row, False)", "message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative test message_record_from_row = message.from_message_record(None)", "message.from_message_record(row, False) ## Positive test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id)", "message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative", "import unittest import logging import os import sys from src.message import message class", "src.message import message class message_test(unittest.TestCase): def test_from_message_record(self): message_record = message( msg_id=185, channel_id=82, source_id=50,", "message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative test message_record_from_row = message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id)", "= message_record.str().split(',') row = [e.replace(\"'\", \"\") if e.find(\"'\") > -1 else int(e) for", "logging import os import sys from src.message import message class message_test(unittest.TestCase): def test_from_message_record(self):", "for e in row] message_record_from_row = message.from_message_record(row, False) ## Positive test self.assertEqual(message_record.date, message_record_from_row.date)", "message( msg_id=185, channel_id=82, source_id=50, source_chat_id='111111', msg='Hello world') row = message_record.str().split(',') row = [e.replace(\"'\",", "int(e) for e in row] message_record_from_row = message.from_message_record(row, False) ## Positive test self.assertEqual(message_record.date,", "[e.replace(\"'\", \"\") if e.find(\"'\") > -1 else int(e) for e in row] message_record_from_row", "from src.message import message class message_test(unittest.TestCase): def test_from_message_record(self): message_record = message( msg_id=185, channel_id=82,", "message_record.str().split(',') row = [e.replace(\"'\", \"\") if e.find(\"'\") > -1 else int(e) for e", "= message.from_message_record(row, False) ## Positive test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id,", "message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative test message_record_from_row", "message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative test message_record_from_row = message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id)", "row = [e.replace(\"'\", \"\") if e.find(\"'\") > -1 else int(e) for e in", "#!/bin/python import unittest import logging import os import sys from src.message import message", "message_test(unittest.TestCase): def test_from_message_record(self): message_record = message( msg_id=185, channel_id=82, source_id=50, source_chat_id='111111', msg='Hello world') row", "Positive test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id,", "\"\") if e.find(\"'\") > -1 else int(e) for e in row] message_record_from_row =", "source_chat_id='111111', msg='Hello world') row = message_record.str().split(',') row = [e.replace(\"'\", \"\") if e.find(\"'\") >", "import logging import os import sys from src.message import message class message_test(unittest.TestCase): def", "test_from_message_record(self): message_record = message( msg_id=185, channel_id=82, source_id=50, source_chat_id='111111', msg='Hello world') row = message_record.str().split(',')", "world') row = message_record.str().split(',') row = [e.replace(\"'\", \"\") if e.find(\"'\") > -1 else", "self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg, message_record_from_row.msg) ## Negative test message_record_from_row = message.from_message_record(None) self.assertEqual(0,", "import message class message_test(unittest.TestCase): def test_from_message_record(self): message_record = message( msg_id=185, channel_id=82, source_id=50, source_chat_id='111111',", "-1 else int(e) for e in row] message_record_from_row = message.from_message_record(row, False) ## Positive", "os import sys from src.message import message class message_test(unittest.TestCase): def test_from_message_record(self): message_record =", "self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id, message_record_from_row.source_id) self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id) self.assertEqual(message_record.msg,", "import sys from src.message import message class message_test(unittest.TestCase): def test_from_message_record(self): message_record = message(", "Negative test message_record_from_row = message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id) self.assertEqual('', message_record_from_row.source_chat_id)", "False) ## Positive test self.assertEqual(message_record.date, message_record_from_row.date) self.assertEqual(message_record.time, message_record_from_row.time) self.assertEqual(message_record.msg_id, message_record_from_row.msg_id) self.assertEqual(message_record.channel_id, message_record_from_row.channel_id) self.assertEqual(message_record.source_id,", "else int(e) for e in row] message_record_from_row = message.from_message_record(row, False) ## Positive test", "= message.from_message_record(None) self.assertEqual(0, message_record_from_row.msg_id) self.assertEqual(0, message_record_from_row.channel_id) self.assertEqual(0, message_record_from_row.source_id) self.assertEqual('', message_record_from_row.source_chat_id) self.assertEqual('', message_record_from_row.msg) if" ]
[]
[ "class Episode(Base): __tablename__ = \"episode\" id = Column(Integer, primary_key=True, index=True) name = Column(String,", "\"episode\" id = Column(Integer, primary_key=True, index=True) name = Column(String, unique=True) air_date = Column(Date)", "import Base from app.models import CharacterEpisode class Episode(Base): __tablename__ = \"episode\" id =", "Integer, String, Date from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship from app.db.session", "from sqlalchemy.orm import relationship from app.db.session import Base from app.models import CharacterEpisode class", "import Column, Integer, String, Date from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship", "= relationship(\"CharacterEpisode\", back_populates=\"episode\") comments = relationship(\"Comment\", back_populates=\"episode\") association_ids = association_proxy( \"characters\", \"character_id\", creator=lambda", "= \"episode\" id = Column(Integer, primary_key=True, index=True) name = Column(String, unique=True) air_date =", "Base from app.models import CharacterEpisode class Episode(Base): __tablename__ = \"episode\" id = Column(Integer,", "association_proxy from sqlalchemy.orm import relationship from app.db.session import Base from app.models import CharacterEpisode", "characters = relationship(\"CharacterEpisode\", back_populates=\"episode\") comments = relationship(\"Comment\", back_populates=\"episode\") association_ids = association_proxy( \"characters\", \"character_id\",", "Column(String, unique=True) air_date = Column(Date) segment = Column(String, unique=True) characters = relationship(\"CharacterEpisode\", back_populates=\"episode\")", "air_date = Column(Date) segment = Column(String, unique=True) characters = relationship(\"CharacterEpisode\", back_populates=\"episode\") comments =", "CharacterEpisode class Episode(Base): __tablename__ = \"episode\" id = Column(Integer, primary_key=True, index=True) name =", "import relationship from app.db.session import Base from app.models import CharacterEpisode class Episode(Base): __tablename__", "from app.db.session import Base from app.models import CharacterEpisode class Episode(Base): __tablename__ = \"episode\"", "= Column(String, unique=True) characters = relationship(\"CharacterEpisode\", back_populates=\"episode\") comments = relationship(\"Comment\", back_populates=\"episode\") association_ids =", "Episode(Base): __tablename__ = \"episode\" id = Column(Integer, primary_key=True, index=True) name = Column(String, unique=True)", "name = Column(String, unique=True) air_date = Column(Date) segment = Column(String, unique=True) characters =", "comments = relationship(\"Comment\", back_populates=\"episode\") association_ids = association_proxy( \"characters\", \"character_id\", creator=lambda cid: CharacterEpisode(character_id=cid), )", "sqlalchemy import Column, Integer, String, Date from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import", "Date from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship from app.db.session import Base", "index=True) name = Column(String, unique=True) air_date = Column(Date) segment = Column(String, unique=True) characters", "id = Column(Integer, primary_key=True, index=True) name = Column(String, unique=True) air_date = Column(Date) segment", "__tablename__ = \"episode\" id = Column(Integer, primary_key=True, index=True) name = Column(String, unique=True) air_date", "= Column(Integer, primary_key=True, index=True) name = Column(String, unique=True) air_date = Column(Date) segment =", "from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship from app.db.session import Base from", "Column, Integer, String, Date from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship from", "from sqlalchemy import Column, Integer, String, Date from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm", "= Column(String, unique=True) air_date = Column(Date) segment = Column(String, unique=True) characters = relationship(\"CharacterEpisode\",", "sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship from app.db.session import Base from app.models", "app.models import CharacterEpisode class Episode(Base): __tablename__ = \"episode\" id = Column(Integer, primary_key=True, index=True)", "from app.models import CharacterEpisode class Episode(Base): __tablename__ = \"episode\" id = Column(Integer, primary_key=True,", "unique=True) air_date = Column(Date) segment = Column(String, unique=True) characters = relationship(\"CharacterEpisode\", back_populates=\"episode\") comments", "Column(Date) segment = Column(String, unique=True) characters = relationship(\"CharacterEpisode\", back_populates=\"episode\") comments = relationship(\"Comment\", back_populates=\"episode\")", "primary_key=True, index=True) name = Column(String, unique=True) air_date = Column(Date) segment = Column(String, unique=True)", "= Column(Date) segment = Column(String, unique=True) characters = relationship(\"CharacterEpisode\", back_populates=\"episode\") comments = relationship(\"Comment\",", "sqlalchemy.orm import relationship from app.db.session import Base from app.models import CharacterEpisode class Episode(Base):", "segment = Column(String, unique=True) characters = relationship(\"CharacterEpisode\", back_populates=\"episode\") comments = relationship(\"Comment\", back_populates=\"episode\") association_ids", "Column(Integer, primary_key=True, index=True) name = Column(String, unique=True) air_date = Column(Date) segment = Column(String,", "import CharacterEpisode class Episode(Base): __tablename__ = \"episode\" id = Column(Integer, primary_key=True, index=True) name", "relationship(\"CharacterEpisode\", back_populates=\"episode\") comments = relationship(\"Comment\", back_populates=\"episode\") association_ids = association_proxy( \"characters\", \"character_id\", creator=lambda cid:", "app.db.session import Base from app.models import CharacterEpisode class Episode(Base): __tablename__ = \"episode\" id", "import association_proxy from sqlalchemy.orm import relationship from app.db.session import Base from app.models import", "String, Date from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship from app.db.session import", "unique=True) characters = relationship(\"CharacterEpisode\", back_populates=\"episode\") comments = relationship(\"Comment\", back_populates=\"episode\") association_ids = association_proxy( \"characters\",", "back_populates=\"episode\") comments = relationship(\"Comment\", back_populates=\"episode\") association_ids = association_proxy( \"characters\", \"character_id\", creator=lambda cid: CharacterEpisode(character_id=cid),", "Column(String, unique=True) characters = relationship(\"CharacterEpisode\", back_populates=\"episode\") comments = relationship(\"Comment\", back_populates=\"episode\") association_ids = association_proxy(", "relationship from app.db.session import Base from app.models import CharacterEpisode class Episode(Base): __tablename__ =" ]
[ "n_transactions_pred # Calculate the probability of alive customers alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'],", "1 # Get the predicted number of transactions in the next 26 weeks", "this customer's frequency, recency and T both for the calibration and observation periods", "Calculate the conditional expected number of transactions in the given period n_transactions_pred =", "from lifetimes import BetaGeoFitter # instantiation of BG-NBD model bgf = BetaGeoFitter(penalizer_coef =", "fitting of BG-NBD model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal'])", "number of transactions # Get the real number of transactions in the observation", "number of transactions in the observation period, which equals frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real']", "bgf = BetaGeoFitter(penalizer_coef = 0.0) # fitting of BG-NBD model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'],", "frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) n_transactions_pred # Calculate the", "T = rfm_cal_holdout['T_cal']) bgf.summary # Assessment of model fit # First is to", "sample_customer['T_cal']) alive_prob # Compare the real and predicted number of transactions # Get", "import mean_squared_error RMSE = mean_squared_error(y_true = rfm_cal_holdout['n_transactions_holdout_real'], y_pred = rfm_cal_holdout['n_transactions_holdout_pred'], squared = False)", "rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1 # Get the predicted number of transactions in", "recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) # Compare the real and predicted transactions", "by the BG-NBD model from lifetimes.plotting import plot_period_transactions _ = plot_period_transactions(bgf) # Prediction", "= sample_customer['T_cal']) alive_prob # Compare the real and predicted number of transactions #", "the given period n_transactions_pred = bgf.predict(t = 26, # set it to 26", "rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) bgf.summary # Assessment of model fit # First is", "BG-NBD model bgf = BetaGeoFitter(penalizer_coef = 0.0) # fitting of BG-NBD model bgf.fit(frequency", "the probability of alive customers alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'],", "= sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) alive_prob # Compare the real", "predicted number of transactions in the next 26 weeks (lenght of the observation", "fit # First is to compare the frequencies between our real calibration data", "<filename>BG-NBD_model/3_training_prediction_evaluation.py # Fitting from lifetimes import BetaGeoFitter # instantiation of BG-NBD model bgf", "the observation period) frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) n_transactions_pred", "import BetaGeoFitter # instantiation of BG-NBD model bgf = BetaGeoFitter(penalizer_coef = 0.0) #", "the real number of transactions in the observation period, which equals frequency_holdout +", "+ 1 # Get the predicted number of transactions in the next 26", "= rfm_cal_holdout['T_cal']) bgf.summary # Assessment of model fit # First is to compare", "model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) bgf.summary # Assessment", "generated by the BG-NBD model from lifetimes.plotting import plot_period_transactions _ = plot_period_transactions(bgf) #", "plot_period_transactions(bgf) # Prediction # First we choose a sample customer sample_customer = rfm_cal_holdout.iloc[20]", "predicted number of transactions # Get the real number of transactions in the", "# First we choose a sample customer sample_customer = rfm_cal_holdout.iloc[20] # Inspect this", "26, # set it to 26 weeks (the length of the observation period)", "mean_squared_error RMSE = mean_squared_error(y_true = rfm_cal_holdout['n_transactions_holdout_real'], y_pred = rfm_cal_holdout['n_transactions_holdout_pred'], squared = False) RMSE", "# Get the real number of transactions in the observation period, which equals", "# fitting of BG-NBD model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T =", "1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1 # Get the predicted number of transactions", "bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) bgf.summary # Assessment of", "rfm_cal_holdout['T_cal']) bgf.summary # Assessment of model fit # First is to compare the", "Prediction # First we choose a sample customer sample_customer = rfm_cal_holdout.iloc[20] # Inspect", "# Calculate the probability of alive customers alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency", "sample_customer['recency_cal'], T = sample_customer['T_cal']) alive_prob # Compare the real and predicted number of", "transactions # Get the real number of transactions in the observation period, which", "in the observation period, which equals frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] +", "of transactions in the observation period, which equals frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real'] =", "Compare the real and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics import mean_squared_error", "observation periods sample_customer # Calculate the conditional expected number of transactions in the", "of BG-NBD model bgf = BetaGeoFitter(penalizer_coef = 0.0) # fitting of BG-NBD model", "model from lifetimes.plotting import plot_period_transactions _ = plot_period_transactions(bgf) # Prediction # First we", "# Calculate the conditional expected number of transactions in the given period n_transactions_pred", "= rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) bgf.summary # Assessment of model", "to compare the frequencies between our real calibration data and artificial data sampled", "Calculate the probability of alive customers alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency =", "of transactions in the next 26 weeks (lenght of the observation period) rfm_cal_holdout['n_transactions_holdout_pred']", "# Assessment of model fit # First is to compare the frequencies between", "real calibration data and artificial data sampled from the distributions generated by the", "#RMSE from sklearn.metrics import mean_squared_error RMSE = mean_squared_error(y_true = rfm_cal_holdout['n_transactions_holdout_real'], y_pred = rfm_cal_holdout['n_transactions_holdout_pred'],", "sample_customer['T_cal']) n_transactions_pred # Calculate the probability of alive customers alive_prob = bgf.conditional_probability_alive(frequency =", "recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) bgf.summary # Assessment of model fit #", "to 26 weeks (the length of the observation period) frequency = sample_customer['frequency_cal'], recency", "First is to compare the frequencies between our real calibration data and artificial", "data sampled from the distributions generated by the BG-NBD model from lifetimes.plotting import", "n_transactions_pred = bgf.predict(t = 26, # set it to 26 weeks (the length", "length of the observation period) frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T =", "+ 1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1 # Get the predicted number of", "and observation periods sample_customer # Calculate the conditional expected number of transactions in", "= rfm_cal_holdout['T_cal']) # Compare the real and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from", "and artificial data sampled from the distributions generated by the BG-NBD model from", "BG-NBD model from lifetimes.plotting import plot_period_transactions _ = plot_period_transactions(bgf) # Prediction # First", "rfm_cal_holdout['frequency_holdout'] + 1 # Get the predicted number of transactions in the next", "periods sample_customer # Calculate the conditional expected number of transactions in the given", "observation period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) #", "import plot_period_transactions _ = plot_period_transactions(bgf) # Prediction # First we choose a sample", "= 0.0) # fitting of BG-NBD model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'],", "between our real calibration data and artificial data sampled from the distributions generated", "# set it to 26 weeks (the length of the observation period) frequency", "expected number of transactions in the given period n_transactions_pred = bgf.predict(t = 26,", "from the distributions generated by the BG-NBD model from lifetimes.plotting import plot_period_transactions _", "period) frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) n_transactions_pred # Calculate", "lifetimes.plotting import plot_period_transactions _ = plot_period_transactions(bgf) # Prediction # First we choose a", "# instantiation of BG-NBD model bgf = BetaGeoFitter(penalizer_coef = 0.0) # fitting of", "T = sample_customer['T_cal']) alive_prob # Compare the real and predicted number of transactions", "of the observation period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T =", "the real and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics import mean_squared_error RMSE", "the frequencies between our real calibration data and artificial data sampled from the", "data and artificial data sampled from the distributions generated by the BG-NBD model", "sample_customer = rfm_cal_holdout.iloc[20] # Inspect this customer's frequency, recency and T both for", "'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics import mean_squared_error RMSE = mean_squared_error(y_true = rfm_cal_holdout['n_transactions_holdout_real'], y_pred =", "of transactions # Get the real number of transactions in the observation period,", "= sample_customer['recency_cal'], T = sample_customer['T_cal']) alive_prob # Compare the real and predicted number", "26 weeks (lenght of the observation period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency =", "and T both for the calibration and observation periods sample_customer # Calculate the", "of the observation period) frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal'])", "instantiation of BG-NBD model bgf = BetaGeoFitter(penalizer_coef = 0.0) # fitting of BG-NBD", "= bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) alive_prob # Compare", "frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) # Compare the real and predicted", "BG-NBD model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) bgf.summary #", "T = sample_customer['T_cal']) n_transactions_pred # Calculate the probability of alive customers alive_prob =", "alive customers alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal'])", "customer's frequency, recency and T both for the calibration and observation periods sample_customer", "customer sample_customer = rfm_cal_holdout.iloc[20] # Inspect this customer's frequency, recency and T both", "calibration and observation periods sample_customer # Calculate the conditional expected number of transactions", "calibration data and artificial data sampled from the distributions generated by the BG-NBD", "= bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) # Compare the real", "sampled from the distributions generated by the BG-NBD model from lifetimes.plotting import plot_period_transactions", "and predicted number of transactions # Get the real number of transactions in", "the conditional expected number of transactions in the given period n_transactions_pred = bgf.predict(t", "transactions in the next 26 weeks (lenght of the observation period) rfm_cal_holdout['n_transactions_holdout_pred'] =", "sample customer sample_customer = rfm_cal_holdout.iloc[20] # Inspect this customer's frequency, recency and T", "26 weeks (the length of the observation period) frequency = sample_customer['frequency_cal'], recency =", "a sample customer sample_customer = rfm_cal_holdout.iloc[20] # Inspect this customer's frequency, recency and", "_ = plot_period_transactions(bgf) # Prediction # First we choose a sample customer sample_customer", "# Inspect this customer's frequency, recency and T both for the calibration and", "real number of transactions in the observation period, which equals frequency_holdout + 1", "probability of alive customers alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T", "weeks (the length of the observation period) frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'],", "number of transactions in the given period n_transactions_pred = bgf.predict(t = 26, #", "weeks (lenght of the observation period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'],", "sklearn.metrics import mean_squared_error RMSE = mean_squared_error(y_true = rfm_cal_holdout['n_transactions_holdout_real'], y_pred = rfm_cal_holdout['n_transactions_holdout_pred'], squared =", "T = rfm_cal_holdout['T_cal']) # Compare the real and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE", "First we choose a sample customer sample_customer = rfm_cal_holdout.iloc[20] # Inspect this customer's", "period, which equals frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1 # Get", "compare the frequencies between our real calibration data and artificial data sampled from", "recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) alive_prob # Compare the real and predicted", "predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics import mean_squared_error RMSE = mean_squared_error(y_true =", "real and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics import mean_squared_error RMSE =", "# Fitting from lifetimes import BetaGeoFitter # instantiation of BG-NBD model bgf =", "plot_period_transactions _ = plot_period_transactions(bgf) # Prediction # First we choose a sample customer", "rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) bgf.summary # Assessment of model fit", "period n_transactions_pred = bgf.predict(t = 26, # set it to 26 weeks (the", "= rfm_cal_holdout.iloc[20] # Inspect this customer's frequency, recency and T both for the", "period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) # Compare", "set it to 26 weeks (the length of the observation period) frequency =", "BetaGeoFitter # instantiation of BG-NBD model bgf = BetaGeoFitter(penalizer_coef = 0.0) # fitting", "transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics import mean_squared_error RMSE = mean_squared_error(y_true = rfm_cal_holdout['n_transactions_holdout_real'],", "distributions generated by the BG-NBD model from lifetimes.plotting import plot_period_transactions _ = plot_period_transactions(bgf)", "transactions in the observation period, which equals frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout']", "# Compare the real and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics import", "model bgf = BetaGeoFitter(penalizer_coef = 0.0) # fitting of BG-NBD model bgf.fit(frequency =", "the distributions generated by the BG-NBD model from lifetimes.plotting import plot_period_transactions _ =", "# First is to compare the frequencies between our real calibration data and", "= sample_customer['recency_cal'], T = sample_customer['T_cal']) n_transactions_pred # Calculate the probability of alive customers", "= sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) n_transactions_pred # Calculate the probability", "model fit # First is to compare the frequencies between our real calibration", "alive_prob # Compare the real and predicted number of transactions # Get the", "both for the calibration and observation periods sample_customer # Calculate the conditional expected", "given period n_transactions_pred = bgf.predict(t = 26, # set it to 26 weeks", "Inspect this customer's frequency, recency and T both for the calibration and observation", "the next 26 weeks (lenght of the observation period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'],", "bgf.predict(t = 26, # set it to 26 weeks (the length of the", "= 26, # set it to 26 weeks (the length of the observation", "Fitting from lifetimes import BetaGeoFitter # instantiation of BG-NBD model bgf = BetaGeoFitter(penalizer_coef", "the predicted number of transactions in the next 26 weeks (lenght of the", "and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics import mean_squared_error RMSE = mean_squared_error(y_true", "sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) n_transactions_pred # Calculate the probability of", "bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) alive_prob # Compare the", "= BetaGeoFitter(penalizer_coef = 0.0) # fitting of BG-NBD model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency", "from sklearn.metrics import mean_squared_error RMSE = mean_squared_error(y_true = rfm_cal_holdout['n_transactions_holdout_real'], y_pred = rfm_cal_holdout['n_transactions_holdout_pred'], squared", "from lifetimes.plotting import plot_period_transactions _ = plot_period_transactions(bgf) # Prediction # First we choose", "choose a sample customer sample_customer = rfm_cal_holdout.iloc[20] # Inspect this customer's frequency, recency", "of alive customers alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T =", "of model fit # First is to compare the frequencies between our real", "number of transactions in the next 26 weeks (lenght of the observation period)", "is to compare the frequencies between our real calibration data and artificial data", "# Get the predicted number of transactions in the next 26 weeks (lenght", "sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) alive_prob # Compare the real and", "in the given period n_transactions_pred = bgf.predict(t = 26, # set it to", "bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) # Compare the real and", "next 26 weeks (lenght of the observation period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency", "the calibration and observation periods sample_customer # Calculate the conditional expected number of", "(the length of the observation period) frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T", "of BG-NBD model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) bgf.summary", "sample_customer # Calculate the conditional expected number of transactions in the given period", "= sample_customer['T_cal']) n_transactions_pred # Calculate the probability of alive customers alive_prob = bgf.conditional_probability_alive(frequency", "real and predicted number of transactions # Get the real number of transactions", "(lenght of the observation period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T", "conditional expected number of transactions in the given period n_transactions_pred = bgf.predict(t =", "= bgf.predict(t = 26, # set it to 26 weeks (the length of", "our real calibration data and artificial data sampled from the distributions generated by", "observation period) frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) n_transactions_pred #", "the observation period, which equals frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1", "equals frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1 # Get the predicted", "BetaGeoFitter(penalizer_coef = 0.0) # fitting of BG-NBD model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency =", "rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) # Compare the", "= rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) # Compare the real and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real',", "= rfm_cal_holdout['frequency_holdout'] + 1 # Get the predicted number of transactions in the", "rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) # Compare the real and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head()", "customers alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) alive_prob", "observation period, which equals frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1 #", "the observation period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26, frequency=rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal'])", "= rfm_cal_holdout['recency_cal'], T = rfm_cal_holdout['T_cal']) bgf.summary # Assessment of model fit # First", "artificial data sampled from the distributions generated by the BG-NBD model from lifetimes.plotting", "frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1 # Get the predicted number", "recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) n_transactions_pred # Calculate the probability of alive", "sample_customer['recency_cal'], T = sample_customer['T_cal']) n_transactions_pred # Calculate the probability of alive customers alive_prob", "# Prediction # First we choose a sample customer sample_customer = rfm_cal_holdout.iloc[20] #", "= plot_period_transactions(bgf) # Prediction # First we choose a sample customer sample_customer =", "Assessment of model fit # First is to compare the frequencies between our", "we choose a sample customer sample_customer = rfm_cal_holdout.iloc[20] # Inspect this customer's frequency,", "of transactions in the given period n_transactions_pred = bgf.predict(t = 26, # set", "it to 26 weeks (the length of the observation period) frequency = sample_customer['frequency_cal'],", "lifetimes import BetaGeoFitter # instantiation of BG-NBD model bgf = BetaGeoFitter(penalizer_coef = 0.0)", "rfm_cal_holdout['T_cal']) # Compare the real and predicted transactions rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics", "rfm_cal_holdout.iloc[20] # Inspect this customer's frequency, recency and T both for the calibration", "Compare the real and predicted number of transactions # Get the real number", "T both for the calibration and observation periods sample_customer # Calculate the conditional", "frequency, recency and T both for the calibration and observation periods sample_customer #", "in the next 26 weeks (lenght of the observation period) rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26,", "the BG-NBD model from lifetimes.plotting import plot_period_transactions _ = plot_period_transactions(bgf) # Prediction #", "bgf.summary # Assessment of model fit # First is to compare the frequencies", "the real and predicted number of transactions # Get the real number of", "rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head() #RMSE from sklearn.metrics import mean_squared_error RMSE = mean_squared_error(y_true = rfm_cal_holdout['n_transactions_holdout_real'], y_pred", "# Compare the real and predicted number of transactions # Get the real", "for the calibration and observation periods sample_customer # Calculate the conditional expected number", "frequencies between our real calibration data and artificial data sampled from the distributions", "0.0) # fitting of BG-NBD model bgf.fit(frequency = rfm_cal_holdout['frequency_cal'], recency = rfm_cal_holdout['recency_cal'], T", "Get the predicted number of transactions in the next 26 weeks (lenght of", "which equals frequency_holdout + 1 rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1 # Get the", "recency and T both for the calibration and observation periods sample_customer # Calculate", "transactions in the given period n_transactions_pred = bgf.predict(t = 26, # set it", "Get the real number of transactions in the observation period, which equals frequency_holdout", "alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'], recency = sample_customer['recency_cal'], T = sample_customer['T_cal']) alive_prob #" ]
[ "for python2 support try: from .blkdiscoveryutil import * except: from blkdiscoveryutil import *", "for path, diskdetails in parent.items(): if not diskdetails.get('type') == \"disk\": continue retval.append(path) return", "import * class LsBlk(BlkDiscoveryUtil): def disks(self): retval = [] parent = self.details() for", "name = child['name'] else: name = \"UNKNOWN\" children[name] = child retval['children'] = children", "self.details() for path, diskdetails in parent.items(): if not diskdetails.get('type') == \"disk\": continue retval.append(path)", "path, diskdetails in parent.items(): if not diskdetails.get('type') == \"disk\": continue retval.append(path) return retval", "== '__main__': import pprint pp = pprint.PrettyPrinter(indent=4) l = LsBlk() devdata = l.details()", "= child.get('name') retval[path] = child for disk, details in retval.items(): self.label_children(details) return self.stringify(retval)", "python2 support try: from .blkdiscoveryutil import * except: from blkdiscoveryutil import * class", "for disk, details in retval.items(): self.label_children(details) return self.stringify(retval) if __name__ == '__main__': import", "if not diskdetails.get('type') == \"disk\": continue retval.append(path) return retval def label_children(self,retval): if not", "disks(self): retval = [] parent = self.details() for path, diskdetails in parent.items(): if", "disk, details in retval.items(): self.label_children(details) return self.stringify(retval) if __name__ == '__main__': import pprint", "\"disk\": continue retval.append(path) return retval def label_children(self,retval): if not retval.get('children'): return children =", "return retval def label_children(self,retval): if not retval.get('children'): return children = {} for child", "from .blkdiscoveryutil import * except: from blkdiscoveryutil import * class LsBlk(BlkDiscoveryUtil): def disks(self):", "__name__ == '__main__': import pprint pp = pprint.PrettyPrinter(indent=4) l = LsBlk() devdata =", "rawoutput = self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p']) parent = json.loads(rawoutput) for child in parent.get('blockdevices',[]):", "from blkdiscoveryutil import * class LsBlk(BlkDiscoveryUtil): def disks(self): retval = [] parent =", "in retval['children']: self.label_children(child) if child.get('name'): name = child['name'] else: name = \"UNKNOWN\" children[name]", "if not retval.get('children'): return children = {} for child in retval['children']: self.label_children(child) if", "in parent.get('blockdevices',[]): #print child['id'] + child['class'] path = child.get('name') retval[path] = child for", "== \"disk\": continue retval.append(path) return retval def label_children(self,retval): if not retval.get('children'): return children", "retval.get('children'): return children = {} for child in retval['children']: self.label_children(child) if child.get('name'): name", "retval = [] parent = self.details() for path, diskdetails in parent.items(): if not", "class LsBlk(BlkDiscoveryUtil): def disks(self): retval = [] parent = self.details() for path, diskdetails", "child['name'] else: name = \"UNKNOWN\" children[name] = child retval['children'] = children def details(self):", "self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p']) parent = json.loads(rawoutput) for child in parent.get('blockdevices',[]): #print child['id']", "child retval['children'] = children def details(self): retval = {} rawoutput = self.subprocess_check_output([\"lsblk\", '--json',", "{} for child in retval['children']: self.label_children(child) if child.get('name'): name = child['name'] else: name", "continue retval.append(path) return retval def label_children(self,retval): if not retval.get('children'): return children = {}", "child for disk, details in retval.items(): self.label_children(details) return self.stringify(retval) if __name__ == '__main__':", "self.stringify(retval) if __name__ == '__main__': import pprint pp = pprint.PrettyPrinter(indent=4) l = LsBlk()", "diskdetails.get('type') == \"disk\": continue retval.append(path) return retval def label_children(self,retval): if not retval.get('children'): return", "child.get('name'): name = child['name'] else: name = \"UNKNOWN\" children[name] = child retval['children'] =", "return children = {} for child in retval['children']: self.label_children(child) if child.get('name'): name =", "parent = json.loads(rawoutput) for child in parent.get('blockdevices',[]): #print child['id'] + child['class'] path =", "diskdetails in parent.items(): if not diskdetails.get('type') == \"disk\": continue retval.append(path) return retval def", "details(self): retval = {} rawoutput = self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p']) parent = json.loads(rawoutput)", "= {} rawoutput = self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p']) parent = json.loads(rawoutput) for child", "'__main__': import pprint pp = pprint.PrettyPrinter(indent=4) l = LsBlk() devdata = l.details() pp.pprint(devdata)", "= pprint.PrettyPrinter(indent=4) l = LsBlk() devdata = l.details() pp.pprint(devdata) disks = l.disks() pp.pprint(disks)", "not retval.get('children'): return children = {} for child in retval['children']: self.label_children(child) if child.get('name'):", "import json #hack for python2 support try: from .blkdiscoveryutil import * except: from", "if __name__ == '__main__': import pprint pp = pprint.PrettyPrinter(indent=4) l = LsBlk() devdata", "children[name] = child retval['children'] = children def details(self): retval = {} rawoutput =", "except: from blkdiscoveryutil import * class LsBlk(BlkDiscoveryUtil): def disks(self): retval = [] parent", "retval = {} rawoutput = self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p']) parent = json.loads(rawoutput) for", "self.label_children(child) if child.get('name'): name = child['name'] else: name = \"UNKNOWN\" children[name] = child", "= child['name'] else: name = \"UNKNOWN\" children[name] = child retval['children'] = children def", "for child in retval['children']: self.label_children(child) if child.get('name'): name = child['name'] else: name =", "import pprint pp = pprint.PrettyPrinter(indent=4) l = LsBlk() devdata = l.details() pp.pprint(devdata) disks", "children = {} for child in retval['children']: self.label_children(child) if child.get('name'): name = child['name']", "import * except: from blkdiscoveryutil import * class LsBlk(BlkDiscoveryUtil): def disks(self): retval =", "else: name = \"UNKNOWN\" children[name] = child retval['children'] = children def details(self): retval", "retval['children']: self.label_children(child) if child.get('name'): name = child['name'] else: name = \"UNKNOWN\" children[name] =", "LsBlk(BlkDiscoveryUtil): def disks(self): retval = [] parent = self.details() for path, diskdetails in", "child in parent.get('blockdevices',[]): #print child['id'] + child['class'] path = child.get('name') retval[path] = child", "self.label_children(details) return self.stringify(retval) if __name__ == '__main__': import pprint pp = pprint.PrettyPrinter(indent=4) l", "= [] parent = self.details() for path, diskdetails in parent.items(): if not diskdetails.get('type')", "in parent.items(): if not diskdetails.get('type') == \"disk\": continue retval.append(path) return retval def label_children(self,retval):", ".blkdiscoveryutil import * except: from blkdiscoveryutil import * class LsBlk(BlkDiscoveryUtil): def disks(self): retval", "parent.items(): if not diskdetails.get('type') == \"disk\": continue retval.append(path) return retval def label_children(self,retval): if", "retval.items(): self.label_children(details) return self.stringify(retval) if __name__ == '__main__': import pprint pp = pprint.PrettyPrinter(indent=4)", "not diskdetails.get('type') == \"disk\": continue retval.append(path) return retval def label_children(self,retval): if not retval.get('children'):", "details in retval.items(): self.label_children(details) return self.stringify(retval) if __name__ == '__main__': import pprint pp", "json #hack for python2 support try: from .blkdiscoveryutil import * except: from blkdiscoveryutil", "parent.get('blockdevices',[]): #print child['id'] + child['class'] path = child.get('name') retval[path] = child for disk,", "pprint pp = pprint.PrettyPrinter(indent=4) l = LsBlk() devdata = l.details() pp.pprint(devdata) disks =", "support try: from .blkdiscoveryutil import * except: from blkdiscoveryutil import * class LsBlk(BlkDiscoveryUtil):", "#print child['id'] + child['class'] path = child.get('name') retval[path] = child for disk, details", "= self.details() for path, diskdetails in parent.items(): if not diskdetails.get('type') == \"disk\": continue", "retval.append(path) return retval def label_children(self,retval): if not retval.get('children'): return children = {} for", "= children def details(self): retval = {} rawoutput = self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p'])", "'--json', '-O', '-p']) parent = json.loads(rawoutput) for child in parent.get('blockdevices',[]): #print child['id'] +", "def disks(self): retval = [] parent = self.details() for path, diskdetails in parent.items():", "in retval.items(): self.label_children(details) return self.stringify(retval) if __name__ == '__main__': import pprint pp =", "child in retval['children']: self.label_children(child) if child.get('name'): name = child['name'] else: name = \"UNKNOWN\"", "{} rawoutput = self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p']) parent = json.loads(rawoutput) for child in", "= json.loads(rawoutput) for child in parent.get('blockdevices',[]): #print child['id'] + child['class'] path = child.get('name')", "def details(self): retval = {} rawoutput = self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p']) parent =", "+ child['class'] path = child.get('name') retval[path] = child for disk, details in retval.items():", "path = child.get('name') retval[path] = child for disk, details in retval.items(): self.label_children(details) return", "'-O', '-p']) parent = json.loads(rawoutput) for child in parent.get('blockdevices',[]): #print child['id'] + child['class']", "[] parent = self.details() for path, diskdetails in parent.items(): if not diskdetails.get('type') ==", "pp = pprint.PrettyPrinter(indent=4) l = LsBlk() devdata = l.details() pp.pprint(devdata) disks = l.disks()", "def label_children(self,retval): if not retval.get('children'): return children = {} for child in retval['children']:", "child.get('name') retval[path] = child for disk, details in retval.items(): self.label_children(details) return self.stringify(retval) if", "retval[path] = child for disk, details in retval.items(): self.label_children(details) return self.stringify(retval) if __name__", "parent = self.details() for path, diskdetails in parent.items(): if not diskdetails.get('type') == \"disk\":", "retval['children'] = children def details(self): retval = {} rawoutput = self.subprocess_check_output([\"lsblk\", '--json', '-O',", "children def details(self): retval = {} rawoutput = self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p']) parent", "retval def label_children(self,retval): if not retval.get('children'): return children = {} for child in", "= child for disk, details in retval.items(): self.label_children(details) return self.stringify(retval) if __name__ ==", "#hack for python2 support try: from .blkdiscoveryutil import * except: from blkdiscoveryutil import", "* except: from blkdiscoveryutil import * class LsBlk(BlkDiscoveryUtil): def disks(self): retval = []", "if child.get('name'): name = child['name'] else: name = \"UNKNOWN\" children[name] = child retval['children']", "= \"UNKNOWN\" children[name] = child retval['children'] = children def details(self): retval = {}", "child['id'] + child['class'] path = child.get('name') retval[path] = child for disk, details in", "blkdiscoveryutil import * class LsBlk(BlkDiscoveryUtil): def disks(self): retval = [] parent = self.details()", "for child in parent.get('blockdevices',[]): #print child['id'] + child['class'] path = child.get('name') retval[path] =", "try: from .blkdiscoveryutil import * except: from blkdiscoveryutil import * class LsBlk(BlkDiscoveryUtil): def", "\"UNKNOWN\" children[name] = child retval['children'] = children def details(self): retval = {} rawoutput", "json.loads(rawoutput) for child in parent.get('blockdevices',[]): #print child['id'] + child['class'] path = child.get('name') retval[path]", "name = \"UNKNOWN\" children[name] = child retval['children'] = children def details(self): retval =", "return self.stringify(retval) if __name__ == '__main__': import pprint pp = pprint.PrettyPrinter(indent=4) l =", "'-p']) parent = json.loads(rawoutput) for child in parent.get('blockdevices',[]): #print child['id'] + child['class'] path", "= child retval['children'] = children def details(self): retval = {} rawoutput = self.subprocess_check_output([\"lsblk\",", "child['class'] path = child.get('name') retval[path] = child for disk, details in retval.items(): self.label_children(details)", "= {} for child in retval['children']: self.label_children(child) if child.get('name'): name = child['name'] else:", "label_children(self,retval): if not retval.get('children'): return children = {} for child in retval['children']: self.label_children(child)", "= self.subprocess_check_output([\"lsblk\", '--json', '-O', '-p']) parent = json.loads(rawoutput) for child in parent.get('blockdevices',[]): #print", "* class LsBlk(BlkDiscoveryUtil): def disks(self): retval = [] parent = self.details() for path," ]
[ "tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization) if __name__=='__main__': value=np.array((0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) a=discretize(value,value.shape[0],2) with tf.Session()", "def discretize(value,action_dim,n_outputs): discretization = tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return", "discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization) if __name__=='__main__': value=np.array((0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) a=discretize(value,value.shape[0],2)", "discretization = tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization) if", "numpy as np import tensorflow as tf def discretize(value,action_dim,n_outputs): discretization = tf.round(value) discretization", "= tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization) if __name__=='__main__': value=np.array((0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) a=discretize(value,value.shape[0],2) with", "as tf def discretize(value,action_dim,n_outputs): discretization = tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]),", "dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization) if __name__=='__main__': value=np.array((0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) a=discretize(value,value.shape[0],2) with tf.Session() as sess: print(a.eval())", "as np import tensorflow as tf def discretize(value,action_dim,n_outputs): discretization = tf.round(value) discretization =", "import numpy as np import tensorflow as tf def discretize(value,action_dim,n_outputs): discretization = tf.round(value)", "tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization) if __name__=='__main__': value=np.array((0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) a=discretize(value,value.shape[0],2) with tf.Session() as sess:", "= tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization) if __name__=='__main__':", "tensorflow as tf def discretize(value,action_dim,n_outputs): discretization = tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0,", "discretize(value,action_dim,n_outputs): discretization = tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization)", "import tensorflow as tf def discretize(value,action_dim,n_outputs): discretization = tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]),", "tf def discretize(value,action_dim,n_outputs): discretization = tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization)))", "dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization) if __name__=='__main__': value=np.array((0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) a=discretize(value,value.shape[0],2) with tf.Session() as", "np import tensorflow as tf def discretize(value,action_dim,n_outputs): discretization = tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1,", "tf.round(value) discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]), tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization))) return tf.to_int32(discretization) if __name__=='__main__': value=np.array((0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9))" ]
[ "тестовой группы\", ) def setUp(self): self.post = PostModelTest.post self.group = PostModelTest.group def test_verbose_name_post(self):", "Group.objects.create( title = \"Название тестовой группы\", slug = \"test-slug\", description = \"Описание тестовой", "дефисы и знаки ' 'подчёркивания'), } for value, expected in field_help_text.items(): with self.subTest(value=value):", "} for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self): \"\"\"Checks", "TestCase from django.contrib.auth import get_user_model from ..models import Post, Group User = get_user_model();", "def setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username = \"Юзер\") cls.post = Post.objects.create( title=\"Заголовок тестовой", "def test_verbose_name_post(self): \"\"\"Checks verbose names for post\"\"\" field_verboses = { \"title\": \"Название статьи\",", "<reponame>MariiaBel/developmentHub from django.test import TestCase from django.contrib.auth import get_user_model from ..models import Post,", "self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def test_str_group(self): \"\"\"Checks __str__ for group\"\"\" expected_str = self.group.title self.assertEqual(expected_str,", "title = \"Название тестовой группы\", slug = \"test-slug\", description = \"Описание тестовой группы\",", "знаки ' 'подчёркивания'), } for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected)", "self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self): \"\"\"Checks verbose names for group\"\"\" field_verboses = {", "\"Название статьи\", \"text\": \"Текст статьи\", \"pub_date\": \"Дата публикации\", \"group\": \"Название группы\", \"author\": \"Автор", "expected) def test_str_post(self): \"\"\"Checks __str__ for post\"\"\" expected_str = self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def", "= User.objects.create(username = \"Юзер\") cls.post = Post.objects.create( title=\"Заголовок тестовой статьи\", text=\"Текст тестовой статьи\",", "\"Название группы\", \"author\": \"Автор статьи\", } for value, expected in field_verboses.items(): with self.subTest(value=value):", "test_help_text_post(self): \"\"\"Checks help text for post\"\"\" field_help_text = { \"title\": \"Дайте название статье\",", "for post\"\"\" expected_str = self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def test_str_group(self): \"\"\"Checks __str__ for group\"\"\"", "def test_verbose_name_group(self): \"\"\"Checks verbose names for group\"\"\" field_verboses = { \"title\": \"Название группы\",", "for group\"\"\" field_help_text = { \"title\": \"Дайте назание группе\", \"slug\": ('Укажите адрес для", "\"group\": \"Укажите группу для статьи\", } for value, expected in field_help_text.items(): with self.subTest(value=value):", "\"Слаг\", \"description\": \"Описание группы\", } for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name,", "{ \"title\": \"Дайте название статье\", \"group\": \"Укажите группу для статьи\", } for value,", "expected) def test_help_text_post(self): \"\"\"Checks help text for post\"\"\" field_help_text = { \"title\": \"Дайте", "def test_str_post(self): \"\"\"Checks __str__ for post\"\"\" expected_str = self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def test_str_group(self):", "статьи\", text=\"Текст тестовой статьи\", author = cls.user, ) cls.group = Group.objects.create( title =", "статьи\", \"text\": \"Текст статьи\", \"pub_date\": \"Дата публикации\", \"group\": \"Название группы\", \"author\": \"Автор статьи\",", "\"title\": \"Дайте название статье\", \"group\": \"Укажите группу для статьи\", } for value, expected", "для статьи\", } for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def", "with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self): \"\"\"Checks __str__ for post\"\"\" expected_str = self.post.text[:15]", "назание группе\", \"slug\": ('Укажите адрес для группы. Используйте ' 'только латиницу, цифры, дефисы", "from django.contrib.auth import get_user_model from ..models import Post, Group User = get_user_model(); class", "публикации\", \"group\": \"Название группы\", \"author\": \"Автор статьи\", } for value, expected in field_verboses.items():", "\"Укажите группу для статьи\", } for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text,", "'подчёркивания'), } for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self):", "self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self): \"\"\"Checks verbose names for group\"\"\" field_verboses = { \"title\":", "группы\", \"author\": \"Автор статьи\", } for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name,", "cls.user = User.objects.create(username = \"Юзер\") cls.post = Post.objects.create( title=\"Заголовок тестовой статьи\", text=\"Текст тестовой", "expected) def test_verbose_name_group(self): \"\"\"Checks verbose names for group\"\"\" field_verboses = { \"title\": \"Название", "PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks verbose names for post\"\"\" field_verboses = { \"title\": \"Название", "in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self): \"\"\"Checks help text for group\"\"\"", "field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self): \"\"\"Checks help text for post\"\"\" field_help_text", "User = get_user_model(); class PostModelTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username =", "= \"Название тестовой группы\", slug = \"test-slug\", description = \"Описание тестовой группы\", )", "slug = \"test-slug\", description = \"Описание тестовой группы\", ) def setUp(self): self.post =", "цифры, дефисы и знаки ' 'подчёркивания'), } for value, expected in field_help_text.items(): with", "PostModelTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username = \"Юзер\") cls.post = Post.objects.create(", "title=\"Заголовок тестовой статьи\", text=\"Текст тестовой статьи\", author = cls.user, ) cls.group = Group.objects.create(", "field_verboses = { \"title\": \"Название статьи\", \"text\": \"Текст статьи\", \"pub_date\": \"Дата публикации\", \"group\":", "= cls.user, ) cls.group = Group.objects.create( title = \"Название тестовой группы\", slug =", "статьи\", \"pub_date\": \"Дата публикации\", \"group\": \"Название группы\", \"author\": \"Автор статьи\", } for value,", "= \"Описание тестовой группы\", ) def setUp(self): self.post = PostModelTest.post self.group = PostModelTest.group", "= get_user_model(); class PostModelTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username = \"Юзер\")", "\"title\": \"Дайте назание группе\", \"slug\": ('Укажите адрес для группы. Используйте ' 'только латиницу,", "for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self): \"\"\"Checks verbose", "\"Дайте назание группе\", \"slug\": ('Укажите адрес для группы. Используйте ' 'только латиницу, цифры,", "= \"Юзер\") cls.post = Post.objects.create( title=\"Заголовок тестовой статьи\", text=\"Текст тестовой статьи\", author =", "setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username = \"Юзер\") cls.post = Post.objects.create( title=\"Заголовок тестовой статьи\",", "тестовой статьи\", text=\"Текст тестовой статьи\", author = cls.user, ) cls.group = Group.objects.create( title", "\"\"\"Checks help text for post\"\"\" field_help_text = { \"title\": \"Дайте название статье\", \"group\":", "User.objects.create(username = \"Юзер\") cls.post = Post.objects.create( title=\"Заголовок тестовой статьи\", text=\"Текст тестовой статьи\", author", "in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self): \"\"\"Checks help text for post\"\"\"", "from ..models import Post, Group User = get_user_model(); class PostModelTest(TestCase): @classmethod def setUpClass(cls):", "' 'только латиницу, цифры, дефисы и знаки ' 'подчёркивания'), } for value, expected", "и знаки ' 'подчёркивания'), } for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text,", "группе\", \"slug\": ('Укажите адрес для группы. Используйте ' 'только латиницу, цифры, дефисы и", "expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self): \"\"\"Checks __str__ for post\"\"\"", "\"Дата публикации\", \"group\": \"Название группы\", \"author\": \"Автор статьи\", } for value, expected in", "= Group.objects.create( title = \"Название тестовой группы\", slug = \"test-slug\", description = \"Описание", "test_str_post(self): \"\"\"Checks __str__ for post\"\"\" expected_str = self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def test_str_group(self): \"\"\"Checks", "..models import Post, Group User = get_user_model(); class PostModelTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass()", "self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self): \"\"\"Checks help text for post\"\"\" field_help_text = { \"title\":", "verbose names for post\"\"\" field_verboses = { \"title\": \"Название статьи\", \"text\": \"Текст статьи\",", "{ \"title\": \"Название статьи\", \"text\": \"Текст статьи\", \"pub_date\": \"Дата публикации\", \"group\": \"Название группы\",", "test_verbose_name_group(self): \"\"\"Checks verbose names for group\"\"\" field_verboses = { \"title\": \"Название группы\", \"slug\":", "field_help_text = { \"title\": \"Дайте название статье\", \"group\": \"Укажите группу для статьи\", }", "{ \"title\": \"Название группы\", \"slug\": \"Слаг\", \"description\": \"Описание группы\", } for value, expected", "self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self): \"\"\"Checks help text for group\"\"\" field_help_text = {", "author = cls.user, ) cls.group = Group.objects.create( title = \"Название тестовой группы\", slug", "django.test import TestCase from django.contrib.auth import get_user_model from ..models import Post, Group User", "= { \"title\": \"Название статьи\", \"text\": \"Текст статьи\", \"pub_date\": \"Дата публикации\", \"group\": \"Название", "= { \"title\": \"Название группы\", \"slug\": \"Слаг\", \"description\": \"Описание группы\", } for value,", "post\"\"\" field_help_text = { \"title\": \"Дайте название статье\", \"group\": \"Укажите группу для статьи\",", "\"title\": \"Название статьи\", \"text\": \"Текст статьи\", \"pub_date\": \"Дата публикации\", \"group\": \"Название группы\", \"author\":", "post\"\"\" field_verboses = { \"title\": \"Название статьи\", \"text\": \"Текст статьи\", \"pub_date\": \"Дата публикации\",", "\"Название группы\", \"slug\": \"Слаг\", \"description\": \"Описание группы\", } for value, expected in field_verboses.items():", "value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self): \"\"\"Checks __str__ for", "__str__ for post\"\"\" expected_str = self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def test_str_group(self): \"\"\"Checks __str__ for", "Post, Group User = get_user_model(); class PostModelTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user =", "field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self): \"\"\"Checks help text for group\"\"\" field_help_text", "def test_help_text_post(self): \"\"\"Checks help text for post\"\"\" field_help_text = { \"title\": \"Дайте название", "expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self): \"\"\"Checks help text for", "= Post.objects.create( title=\"Заголовок тестовой статьи\", text=\"Текст тестовой статьи\", author = cls.user, ) cls.group", ") def setUp(self): self.post = PostModelTest.post self.group = PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks verbose", "value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self): \"\"\"Checks verbose names", "expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self): \"\"\"Checks verbose names for", "with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self): \"\"\"Checks help text for group\"\"\" field_help_text =", "class PostModelTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username = \"Юзер\") cls.post =", "группы. Используйте ' 'только латиницу, цифры, дефисы и знаки ' 'подчёркивания'), } for", "\"title\": \"Название группы\", \"slug\": \"Слаг\", \"description\": \"Описание группы\", } for value, expected in", "def setUp(self): self.post = PostModelTest.post self.group = PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks verbose names", "super().setUpClass() cls.user = User.objects.create(username = \"Юзер\") cls.post = Post.objects.create( title=\"Заголовок тестовой статьи\", text=\"Текст", "import get_user_model from ..models import Post, Group User = get_user_model(); class PostModelTest(TestCase): @classmethod", "self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self): \"\"\"Checks help text for post\"\"\" field_help_text = {", "value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self): \"\"\"Checks help text", "группу для статьи\", } for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected)", "\"author\": \"Автор статьи\", } for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected)", "expected) def test_help_text_group(self): \"\"\"Checks help text for group\"\"\" field_help_text = { \"title\": \"Дайте", "\"group\": \"Название группы\", \"author\": \"Автор статьи\", } for value, expected in field_verboses.items(): with", "тестовой группы\", slug = \"test-slug\", description = \"Описание тестовой группы\", ) def setUp(self):", "\"test-slug\", description = \"Описание тестовой группы\", ) def setUp(self): self.post = PostModelTest.post self.group", "\"slug\": \"Слаг\", \"description\": \"Описание группы\", } for value, expected in field_verboses.items(): with self.subTest(value=value):", "for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self): \"\"\"Checks __str__", "= self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def test_str_group(self): \"\"\"Checks __str__ for group\"\"\" expected_str = self.group.title", "import TestCase from django.contrib.auth import get_user_model from ..models import Post, Group User =", "for post\"\"\" field_help_text = { \"title\": \"Дайте название статье\", \"group\": \"Укажите группу для", "self.group = PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks verbose names for post\"\"\" field_verboses = {", "\"Текст статьи\", \"pub_date\": \"Дата публикации\", \"group\": \"Название группы\", \"author\": \"Автор статьи\", } for", "self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self): \"\"\"Checks help text for group\"\"\" field_help_text = { \"title\":", "django.contrib.auth import get_user_model from ..models import Post, Group User = get_user_model(); class PostModelTest(TestCase):", "from django.test import TestCase from django.contrib.auth import get_user_model from ..models import Post, Group", "for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self): \"\"\"Checks help", "names for group\"\"\" field_verboses = { \"title\": \"Название группы\", \"slug\": \"Слаг\", \"description\": \"Описание", "self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self): \"\"\"Checks __str__ for post\"\"\" expected_str = self.post.text[:15] self.assertEqual(expected_str,", "\"text\": \"Текст статьи\", \"pub_date\": \"Дата публикации\", \"group\": \"Название группы\", \"author\": \"Автор статьи\", }", "Используйте ' 'только латиницу, цифры, дефисы и знаки ' 'подчёркивания'), } for value,", "get_user_model from ..models import Post, Group User = get_user_model(); class PostModelTest(TestCase): @classmethod def", ") cls.group = Group.objects.create( title = \"Название тестовой группы\", slug = \"test-slug\", description", "test_verbose_name_post(self): \"\"\"Checks verbose names for post\"\"\" field_verboses = { \"title\": \"Название статьи\", \"text\":", "text for post\"\"\" field_help_text = { \"title\": \"Дайте название статье\", \"group\": \"Укажите группу", "группы\", ) def setUp(self): self.post = PostModelTest.post self.group = PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks", "' 'подчёркивания'), } for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def", "\"description\": \"Описание группы\", } for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected)", "expected_str = self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def test_str_group(self): \"\"\"Checks __str__ for group\"\"\" expected_str =", "text for group\"\"\" field_help_text = { \"title\": \"Дайте назание группе\", \"slug\": ('Укажите адрес", "expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self): \"\"\"Checks help text for", "description = \"Описание тестовой группы\", ) def setUp(self): self.post = PostModelTest.post self.group =", "группы\", \"slug\": \"Слаг\", \"description\": \"Описание группы\", } for value, expected in field_verboses.items(): with", "\"Описание группы\", } for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def", "\"\"\"Checks help text for group\"\"\" field_help_text = { \"title\": \"Дайте назание группе\", \"slug\":", "@classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username = \"Юзер\") cls.post = Post.objects.create( title=\"Заголовок", "} for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self): \"\"\"Checks", "help text for post\"\"\" field_help_text = { \"title\": \"Дайте название статье\", \"group\": \"Укажите", "= { \"title\": \"Дайте название статье\", \"group\": \"Укажите группу для статьи\", } for", "field_help_text = { \"title\": \"Дайте назание группе\", \"slug\": ('Укажите адрес для группы. Используйте", "'только латиницу, цифры, дефисы и знаки ' 'подчёркивания'), } for value, expected in", "} for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self): \"\"\"Checks", "field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self): \"\"\"Checks verbose names for group\"\"\" field_verboses", "= { \"title\": \"Дайте назание группе\", \"slug\": ('Укажите адрес для группы. Используйте '", "get_user_model(); class PostModelTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username = \"Юзер\") cls.post", "self.post = PostModelTest.post self.group = PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks verbose names for post\"\"\"", "Post.objects.create( title=\"Заголовок тестовой статьи\", text=\"Текст тестовой статьи\", author = cls.user, ) cls.group =", "название статье\", \"group\": \"Укажите группу для статьи\", } for value, expected in field_help_text.items():", "группы\", } for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self):", "cls.user, ) cls.group = Group.objects.create( title = \"Название тестовой группы\", slug = \"test-slug\",", "= PostModelTest.post self.group = PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks verbose names for post\"\"\" field_verboses", "статьи\", author = cls.user, ) cls.group = Group.objects.create( title = \"Название тестовой группы\",", "cls.group = Group.objects.create( title = \"Название тестовой группы\", slug = \"test-slug\", description =", "группы\", slug = \"test-slug\", description = \"Описание тестовой группы\", ) def setUp(self): self.post", "verbose names for group\"\"\" field_verboses = { \"title\": \"Название группы\", \"slug\": \"Слаг\", \"description\":", "\"Описание тестовой группы\", ) def setUp(self): self.post = PostModelTest.post self.group = PostModelTest.group def", "cls.post = Post.objects.create( title=\"Заголовок тестовой статьи\", text=\"Текст тестовой статьи\", author = cls.user, )", "for group\"\"\" field_verboses = { \"title\": \"Название группы\", \"slug\": \"Слаг\", \"description\": \"Описание группы\",", "for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self): \"\"\"Checks help", "{ \"title\": \"Дайте назание группе\", \"slug\": ('Укажите адрес для группы. Используйте ' 'только", "\"\"\"Checks verbose names for post\"\"\" field_verboses = { \"title\": \"Название статьи\", \"text\": \"Текст", "статьи\", } for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self):", "help text for group\"\"\" field_help_text = { \"title\": \"Дайте назание группе\", \"slug\": ('Укажите", "PostModelTest.post self.group = PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks verbose names for post\"\"\" field_verboses =", "test_help_text_group(self): \"\"\"Checks help text for group\"\"\" field_help_text = { \"title\": \"Дайте назание группе\",", "\"Название тестовой группы\", slug = \"test-slug\", description = \"Описание тестовой группы\", ) def", "for post\"\"\" field_verboses = { \"title\": \"Название статьи\", \"text\": \"Текст статьи\", \"pub_date\": \"Дата", "\"slug\": ('Укажите адрес для группы. Используйте ' 'только латиницу, цифры, дефисы и знаки", "тестовой статьи\", author = cls.user, ) cls.group = Group.objects.create( title = \"Название тестовой", "import Post, Group User = get_user_model(); class PostModelTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user", "group\"\"\" field_verboses = { \"title\": \"Название группы\", \"slug\": \"Слаг\", \"description\": \"Описание группы\", }", "names for post\"\"\" field_verboses = { \"title\": \"Название статьи\", \"text\": \"Текст статьи\", \"pub_date\":", "= PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks verbose names for post\"\"\" field_verboses = { \"title\":", "value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self): \"\"\"Checks help text", "статье\", \"group\": \"Укажите группу для статьи\", } for value, expected in field_help_text.items(): with", "group\"\"\" field_help_text = { \"title\": \"Дайте назание группе\", \"slug\": ('Укажите адрес для группы.", "setUp(self): self.post = PostModelTest.post self.group = PostModelTest.group def test_verbose_name_post(self): \"\"\"Checks verbose names for", "\"Автор статьи\", } for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def", "text=\"Текст тестовой статьи\", author = cls.user, ) cls.group = Group.objects.create( title = \"Название", "post\"\"\" expected_str = self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def test_str_group(self): \"\"\"Checks __str__ for group\"\"\" expected_str", "\"Юзер\") cls.post = Post.objects.create( title=\"Заголовок тестовой статьи\", text=\"Текст тестовой статьи\", author = cls.user,", "with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self): \"\"\"Checks verbose names for group\"\"\" field_verboses =", "self.assertEqual(expected_str, str(self.post)) def test_str_group(self): \"\"\"Checks __str__ for group\"\"\" expected_str = self.group.title self.assertEqual(expected_str, str(self.group))", "in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self): \"\"\"Checks verbose names for group\"\"\"", "\"\"\"Checks verbose names for group\"\"\" field_verboses = { \"title\": \"Название группы\", \"slug\": \"Слаг\",", "in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self): \"\"\"Checks __str__ for post\"\"\" expected_str", "with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).verbose_name, expected) def test_help_text_post(self): \"\"\"Checks help text for post\"\"\" field_help_text =", "def test_help_text_group(self): \"\"\"Checks help text for group\"\"\" field_help_text = { \"title\": \"Дайте назание", "} for value, expected in field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).help_text, expected) def test_help_text_group(self): \"\"\"Checks", "field_help_text.items(): with self.subTest(value=value): self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self): \"\"\"Checks __str__ for post\"\"\" expected_str =", "\"pub_date\": \"Дата публикации\", \"group\": \"Название группы\", \"author\": \"Автор статьи\", } for value, expected", "статьи\", } for value, expected in field_verboses.items(): with self.subTest(value=value): self.assertEqual(self.post._meta.get_field(value).verbose_name, expected) def test_verbose_name_group(self):", "адрес для группы. Используйте ' 'только латиницу, цифры, дефисы и знаки ' 'подчёркивания'),", "= \"test-slug\", description = \"Описание тестовой группы\", ) def setUp(self): self.post = PostModelTest.post", "\"\"\"Checks __str__ for post\"\"\" expected_str = self.post.text[:15] self.assertEqual(expected_str, str(self.post)) def test_str_group(self): \"\"\"Checks __str__", "\"Дайте название статье\", \"group\": \"Укажите группу для статьи\", } for value, expected in", "латиницу, цифры, дефисы и знаки ' 'подчёркивания'), } for value, expected in field_help_text.items():", "self.assertEqual(self.group._meta.get_field(value).help_text, expected) def test_str_post(self): \"\"\"Checks __str__ for post\"\"\" expected_str = self.post.text[:15] self.assertEqual(expected_str, str(self.post))", "('Укажите адрес для группы. Используйте ' 'только латиницу, цифры, дефисы и знаки '", "field_verboses = { \"title\": \"Название группы\", \"slug\": \"Слаг\", \"description\": \"Описание группы\", } for", "Group User = get_user_model(); class PostModelTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username", "для группы. Используйте ' 'только латиницу, цифры, дефисы и знаки ' 'подчёркивания'), }" ]
[ "for i in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\" \") for j in", "return True def evaluate(self, side): total = 0 for j in range(Board.ROW_SIZE): for", "False self.board[j][i] = Figures.Rook(side, cur_pos, was_moved) elif figure_type == FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side,", "elif figure_type == FigureType.PAWN: was_moved = True if side == Side.WHITE: if i", "letter == 'n': self.board[j][i] = Figures.Knight(side, cur_pos) elif letter == 'q': self.board[j][i] =", "side = Side.BLACK letter = letter.lower() cur_pos = Vector2d(j, i) if letter ==", "if figure_type == FigureType.KING: latter = 'k' elif figure_type == FigureType.QUEEN: latter =", "== FigureType.PAWN: latter = 'p' if side == Side.WHITE: latter = latter.upper() export_board[i", "None: if self.board[j][i].side == side: figures.append(self.board[j][i]) return figures def make_move(self, move): self.get(move.point_from).make_move(self, move.point_to)", "in range(len(enemy_figures)): cur_figure = enemy_figures[i] available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board", "range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] str_board = ['.' for j in", "figure.generate_moves(self, my_turn) else: attacked_cells = attacked_cells + figure.generate_moves(self) for k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j,", "not None: if figure.side is side: sign = 1 else: sign = -1", "figure is not None: if figure.side is side: sign = 1 else: sign", "make_move(self, move): self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self, side): attacked_cells = [] for j in", "figure_type == FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side, cur_pos) elif figure_type == FigureType.PAWN: was_moved =", "self.board[j][i] is None: continue figure_type = self.board[j][i].figure_type side = self.board[j][i].side if figure_type ==", "copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False:", "else: continue def serialize_to_str(self): str_board = ['.' for j in range(0, Board.ROW_SIZE) for", "import ChessAI.GameController.game_figures as Figures from ChessBoard.chess_board import Board from ChessBoard.chess_figure import FigureType, Side", "self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \") for k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\";", "side = self.board[j][i].side if figure_type == FigureType.KING: latter = 'k' elif figure_type ==", "def make_move(self, move): self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self, side): attacked_cells = [] for j", "if new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return", "if figure is not None and figure.side == side_to_del: if isinstance(figure, Figures.Pawn): figure.double_move", "for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if isinstance(self.board[j][i], Figures.King)", "= 'k' elif figure_type == FigureType.QUEEN: latter = 'q' elif figure_type == FigureType.ROOK:", "elif side == Side.BLACK: if i == GameBoard.default_black_pawn_row: was_moved = False self.board[j][i] =", "= False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_king_pos: was_moved = False", "== 'i': self.board[j][i] = Figures.King(side, cur_pos, True) elif letter == 'b': self.board[j][i] =", "'n' elif figure_type == FigureType.BISHOP: latter = 'b' elif figure_type == FigureType.PAWN: latter", "delete_double_move(self, side_to_del): for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j,", "range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: attack_cells", "my_turn) else: attacked_cells = attacked_cells + figure.generate_moves(self) for k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i),", "cur_pos == GameBoard.default_black_king_pos: was_moved = False self.board[j][i] = Figures.King(side, cur_pos, was_moved) elif figure_type", "GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos: was_moved = False self.board[j][i] = Figures.Rook(side, cur_pos, was_moved)", "self.board[j][i] = Figures.Bishop(side, cur_pos) elif figure_type == FigureType.PAWN: was_moved = True if side", "is_that_check(self, my_side): attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in attacked_cells def", "== FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side, cur_pos) elif figure_type == FigureType.PAWN: was_moved = True", "elif letter == 'a': self.board[j][i] = Figures.Pawn(side, cur_pos, True) elif letter == 'w':", "for j in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] str_board = ['.'", "for k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print() def get_by_pos(self,", "cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x)", "Figures from ChessBoard.chess_board import Board from ChessBoard.chess_figure import FigureType, Side from Vector2d.Vector2d import", "if side == Side.WHITE: if i == GameBoard.default_white_pawn_row: was_moved = False elif side", "def get_by_pos(self, x, y): return self.board[x][y] def get(self, position): return self.board[position.x][position.y] def set(self,", "1 default_white_rook_right_pos = Vector2d(7, 7) default_white_rook_left_pos = Vector2d(0, 7) default_black_rook_right_pos = Vector2d(7, 0)", "Board.ROW_SIZE): if chess_board.board[j][i] is None: continue figure_type = chess_board.board[j][i].figure_type side = chess_board.board[j][i].side cur_pos", "figure_type == FigureType.QUEEN: self.board[j][i] = Figures.Queen(side, cur_pos) elif figure_type == FigureType.ROOK: was_moved =", "'q': self.board[j][i] = Figures.Queen(side, cur_pos) elif letter == 'p': self.board[j][i] = Figures.Pawn(side, cur_pos)", "self.board[j][i].side if figure_type == FigureType.KING: latter = 'k' elif figure_type == FigureType.QUEEN: latter", "in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if self.board[j][i].side == side: figures.append(self.board[j][i])", "Figures.Pawn): attacked_cells = attacked_cells + figure.generate_moves(self, True) else: attacked_cells = attacked_cells + figure.generate_moves(self)", "figure_lat.lower() if lower == 'q': self.board[position.x][position.y] = Figures.Queen(side, position) if lower == 'b':", "sys.stdout.write(\" \") for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: self.board[j][i].print()", "self.board[j][i] is not None: attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \") for k in", "range(0, Board.ROW_SIZE) for i in range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for", "i in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print() print() for i in range(0,", "== side: figures.append(self.board[j][i]) return figures def make_move(self, move): self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self, side):", "ChessBoard.chess_figure import FigureType, Side from Vector2d.Vector2d import Vector2d, Move class GameBoard: default_white_king_pos =", "was_moved) else: continue def serialize_to_str(self): str_board = ['.' for j in range(0, Board.ROW_SIZE)", "not None: if self.board[j][i].side == side: figures.append(self.board[j][i]) return figures def make_move(self, move): self.get(move.point_from).make_move(self,", "figure is not None and figure.side == side: if isinstance(figure, Figures.King): attacked_cells =", "\") print() print() for i in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\" \")", "if letter == 'k': self.board[j][i] = Figures.King(side, cur_pos, False) elif letter == 'i':", "return self.board[position.x][position.y] def set(self, position, game_object): self.board[position.x][position.y] = game_object def get_king_cell(self, side): for", "is_that_mate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] available_moves", "range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue str_board[i * Board.ROW_SIZE + j] =", "return False else: available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self)", "False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos:", "i) if figure is not None and figure.side == side: if isinstance(figure, Figures.King):", "7) default_white_rook_left_pos = Vector2d(0, 7) default_black_rook_right_pos = Vector2d(7, 0) default_black_rook_left_pos = Vector2d(0, 0)", "if figure is not None and figure.side == side: if isinstance(figure, Figures.King): attacked_cells", "= [[None for j in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] str_board", "for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): pos = Vector2d(j, i) figure", "= self.board[position.x][position.y].side lower = figure_lat.lower() if lower == 'q': self.board[position.x][position.y] = Figures.Queen(side, position)", "False else: available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if", "i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): letter = str_board[i *", "* sign) return total def delete_double_move(self, side_to_del): for j in range(Board.ROW_SIZE): for i", "= 0 for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): pos = Vector2d(j,", "= Figures.Knight(side, cur_pos) elif letter == 'q': self.board[j][i] = Figures.Queen(side, cur_pos) elif letter", "isinstance(figure, Figures.Pawn): figure.double_move = False def swap_pawn(self, position, figure_lat): side = self.board[position.x][position.y].side lower", "if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, my_turn) else: attacked_cells = attacked_cells", "j in range(0, Board.ROW_SIZE) for i in range(0, Board.COLUMN_SIZE)] for i in range(0,", "= self.board[j][i].side if figure_type == FigureType.KING: latter = 'k' elif figure_type == FigureType.QUEEN:", "in attacked_cells def is_that_mate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure", "elif isinstance(figure, Figures.Pawn): attacked_cells = attacked_cells + figure.generate_moves(self, True) else: attacked_cells = attacked_cells", "= Figures.Rook(side, cur_pos, False) elif letter == 'o': self.board[j][i] = Figures.Rook(side, cur_pos, True)", "GameBoard.default_white_king_pos: was_moved = False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_king_pos: was_moved", "+ j] if letter.isupper(): side = Side.WHITE else: side = Side.BLACK letter =", "= latter return export_board def print(self): sys.stdout.write(\" \") sys.stdout.write(\" \") sys.stdout.write(\" \") for", "figures.append(self.board[j][i]) return figures def make_move(self, move): self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self, side): attacked_cells =", "True) elif letter == 'w': self.board[j][i] = Figures.Pawn(side, cur_pos, False, True) def export_chess_board(self):", "res def deserialize_from_str(self, board_as_str): self.board = [[None for j in range(0, Board.ROW_SIZE)] for", "was_moved = True if side == Side.WHITE: if i == GameBoard.default_white_pawn_row: was_moved =", "figure_type = chess_board.board[j][i].figure_type side = chess_board.board[j][i].side cur_pos = Vector2d(j, i) if figure_type ==", "i in range(Board.COLUMN_SIZE): pos = Vector2d(j, i) figure = self.get(pos) if figure is", "pos = Vector2d(j, i) figure = self.get(pos) if figure is not None: if", "i) if figure_type == FigureType.KING: was_moved = True if side == Side.WHITE: if", "is False: return False return True def is_that_stalemate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for", "range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i) if figure is not None and figure.side ==", "elif figure_type == FigureType.KNIGHT: latter = 'n' elif figure_type == FigureType.BISHOP: latter =", "i) figure = self.get(pos) if figure is not None: if figure.side is side:", "Vector2d, Move class GameBoard: default_white_king_pos = Vector2d(4, 7) default_black_king_pos = Vector2d(4, 0) default_white_pawn_row", "= attacked_cells + figure.generate_moves(self, True) else: attacked_cells = attacked_cells + figure.generate_moves(self) return attacked_cells", "cur_pos, True) elif letter == 'w': self.board[j][i] = Figures.Pawn(side, cur_pos, False, True) def", "= self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] if isinstance(cur_figure, Figures.King) is", "\") else: sys.stdout.write(\"*\") sys.stdout.write(\" \") print() def print_attacked_cells(self): for i in range(0, Board.COLUMN_SIZE):", "for i in range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j in", "if self.board[j][i] is None: continue str_board[i * Board.ROW_SIZE + j] = self.board[j][i].serialized_letter() res", "= cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is None:", "in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print() def get_by_pos(self, x, y):", "Board.ROW_SIZE + j] if letter.isupper(): side = Side.WHITE else: side = Side.BLACK letter", "self.board[j][i] = Figures.Queen(side, cur_pos) elif figure_type == FigureType.ROOK: was_moved = True if side", "== GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos: was_moved = False elif side == Side.BLACK:", "= [] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j,", "Vector2d(j, i) if figure_type == FigureType.KING: was_moved = True if side == Side.WHITE:", "for i in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE): res += str_board[i] return res def", "len(available_moves) != 0: return False else: available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)):", "== FigureType.QUEEN: latter = 'q' elif figure_type == FigureType.ROOK: latter = 'r' elif", "summary_attacked_cells(self, side): attacked_cells = [] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE):", "'k': self.board[j][i] = Figures.King(side, cur_pos, False) elif letter == 'i': self.board[j][i] = Figures.King(side,", "lower == 'n': self.board[position.x][position.y] = Figures.Knight(side, position) if lower == 'r': self.board[position.x][position.y] =", "= 'b' elif figure_type == FigureType.PAWN: latter = 'p' if side == Side.WHITE:", "sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print() print() for i in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \")", "print(self): sys.stdout.write(\" \") sys.stdout.write(\" \") sys.stdout.write(\" \") for i in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__())", "def serialize_to_str(self): str_board = ['.' for j in range(0, Board.ROW_SIZE) for i in", "= Figures.Pawn(side, cur_pos, False, True) def export_chess_board(self): export_board = ['.' for j in", "Figures.Queen(side, position) if lower == 'b': self.board[position.x][position.y] = Figures.Bishop(side, position) if lower ==", "if lower == 'n': self.board[position.x][position.y] = Figures.Knight(side, position) if lower == 'r': self.board[position.x][position.y]", "'b': self.board[j][i] = Figures.Bishop(side, cur_pos) elif letter == 'r': self.board[j][i] = Figures.Rook(side, cur_pos,", "Board.ROW_SIZE + j] = self.board[j][i].serialized_letter() res = \"\" for i in range(0, Board.COLUMN_SIZE", "None: continue str_board[i * Board.ROW_SIZE + j] = self.board[j][i].serialized_letter() res = \"\" for", "Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue str_board[i *", "side): total = 0 for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): pos", "== 'p': self.board[j][i] = Figures.Pawn(side, cur_pos) elif letter == 'a': self.board[j][i] = Figures.Pawn(side,", "was_moved = False self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved) else: continue def serialize_to_str(self): str_board", "position): return self.board[position.x][position.y] def set(self, position, game_object): self.board[position.x][position.y] = game_object def get_king_cell(self, side):", "isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, my_turn) else: attacked_cells = attacked_cells +", "figures def make_move(self, move): self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self, side): attacked_cells = [] for", "sys.stdout.write(\" \") print() def print_attacked_cells(self): for i in range(0, Board.COLUMN_SIZE): for j in", "is False: return False return True def evaluate(self, side): total = 0 for", "= Figures.Queen(side, cur_pos) elif letter == 'p': self.board[j][i] = Figures.Pawn(side, cur_pos) elif letter", "and self.board[j][i].side == side: return Vector2d(j, i) def get_figures_list(self, side): figures = []", "elif figure_type == FigureType.ROOK: latter = 'r' elif figure_type == FigureType.KNIGHT: latter =", "enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in attacked_cells def is_that_mate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side))", "if isinstance(figure, Figures.Pawn): figure.double_move = False def swap_pawn(self, position, figure_lat): side = self.board[position.x][position.y].side", "\") sys.stdout.write(\" \") for i in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print() print()", "side == Side.BLACK: if cur_pos == GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos: was_moved =", "class GameBoard: default_white_king_pos = Vector2d(4, 7) default_black_king_pos = Vector2d(4, 0) default_white_pawn_row = 6", "i in range(0, Board.COLUMN_SIZE)] str_board = ['.' for j in range(0, Board.ROW_SIZE) for", "Board.ROW_SIZE): if self.board[j][i] is not None: self.board[j][i].print() sys.stdout.write(\" \") else: sys.stdout.write(\"*\") sys.stdout.write(\" \")", "elif figure_type == FigureType.ROOK: was_moved = True if side == Side.WHITE: if cur_pos", "j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print()", "elif figure_type == FigureType.PAWN: latter = 'p' if side == Side.WHITE: latter =", "j in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] for i in range(0,", "True: available_moves = cur_figure.generate_moves(self) if len(available_moves) != 0: return False else: available_moves =", "= 1 default_white_rook_right_pos = Vector2d(7, 7) default_white_rook_left_pos = Vector2d(0, 7) default_black_rook_right_pos = Vector2d(7,", "for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): str_board[i * Board.ROW_SIZE", "self.board[position.x][position.y] = Figures.Knight(side, position) if lower == 'r': self.board[position.x][position.y] = Figures.Rook(side, position, True)", "True def evaluate(self, side): total = 0 for j in range(Board.ROW_SIZE): for i", "0) def __init__(self, chess_board): self.board = [[None for j in range(0, Board.ROW_SIZE)] for", "j] = latter return export_board def print(self): sys.stdout.write(\" \") sys.stdout.write(\" \") sys.stdout.write(\" \")", "= Figures.King(side, cur_pos, False) elif letter == 'i': self.board[j][i] = Figures.King(side, cur_pos, True)", "cur_pos) elif letter == 'a': self.board[j][i] = Figures.Pawn(side, cur_pos, True) elif letter ==", "Figures.King(side, cur_pos, False) elif letter == 'i': self.board[j][i] = Figures.King(side, cur_pos, True) elif", "attacked_cells = attacked_cells + figure.generate_moves(self) return attacked_cells def summary_moves(self, side, my_turn=True): summary_moves =", "from ChessBoard.chess_board import Board from ChessBoard.chess_figure import FigureType, Side from Vector2d.Vector2d import Vector2d,", "letter == 'i': self.board[j][i] = Figures.King(side, cur_pos, True) elif letter == 'b': self.board[j][i]", "= Vector2d(7, 0) default_black_rook_left_pos = Vector2d(0, 0) def __init__(self, chess_board): self.board = [[None", "== FigureType.ROOK: latter = 'r' elif figure_type == FigureType.KNIGHT: latter = 'n' elif", "sys.stdout.write(\" \") sys.stdout.write(\" \") for i in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print()", "Side.WHITE: if cur_pos == GameBoard.default_white_king_pos: was_moved = False elif side == Side.BLACK: if", "range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\" \") for j in range(0, Board.ROW_SIZE): if", "FigureType.ROOK: was_moved = True if side == Side.WHITE: if cur_pos == GameBoard.default_white_rook_left_pos or", "was_moved = False elif side == Side.BLACK: if i == GameBoard.default_black_pawn_row: was_moved =", "elif letter == 'i': self.board[j][i] = Figures.King(side, cur_pos, True) elif letter == 'b':", "res = \"\" for i in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE): res += str_board[i]", "== 'a': self.board[j][i] = Figures.Pawn(side, cur_pos, True) elif letter == 'w': self.board[j][i] =", "self.board[j][i] is not None: if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side == side: return Vector2d(j,", "Vector2d(4, 7) default_black_king_pos = Vector2d(4, 0) default_white_pawn_row = 6 default_black_pawn_row = 1 default_white_rook_right_pos", "letter.isupper(): side = Side.WHITE else: side = Side.BLACK letter = letter.lower() cur_pos =", "in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue str_board[i * Board.ROW_SIZE + j]", "== GameBoard.default_white_rook_right_pos: was_moved = False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_rook_left_pos", "in range(Board.COLUMN_SIZE): attacked_cells.clear() figure = self.get_by_pos(j, i) if figure is not None and", "side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, my_turn) else: attacked_cells =", "== 'q': self.board[position.x][position.y] = Figures.Queen(side, position) if lower == 'b': self.board[position.x][position.y] = Figures.Bishop(side,", "= 'n' elif figure_type == FigureType.BISHOP: latter = 'b' elif figure_type == FigureType.PAWN:", "for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if self.board[j][i].side ==", "sys import ChessAI.GameController.game_figures as Figures from ChessBoard.chess_board import Board from ChessBoard.chess_figure import FigureType,", "[] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i)", "side = self.board[position.x][position.y].side lower = figure_lat.lower() if lower == 'q': self.board[position.x][position.y] = Figures.Queen(side,", "is not None: if self.board[j][i].side == side: figures.append(self.board[j][i]) return figures def make_move(self, move):", "i in range(len(enemy_figures)): cur_figure = enemy_figures[i] if isinstance(cur_figure, Figures.King) is not True: available_moves", "summary_moves def is_that_check(self, my_side): attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in", "Figures.Bishop(side, position) if lower == 'n': self.board[position.x][position.y] = Figures.Knight(side, position) if lower ==", "Side.BLACK: if i == GameBoard.default_black_pawn_row: was_moved = False self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved)", "range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print() def get_by_pos(self, x, y): return", "Side.WHITE: latter = latter.upper() export_board[i * Board.ROW_SIZE + j] = latter return export_board", "for i in range(Board.COLUMN_SIZE): pos = Vector2d(j, i) figure = self.get(pos) if figure", "not None: self.board[j][i].print() sys.stdout.write(\" \") else: sys.stdout.write(\"*\") sys.stdout.write(\" \") print() def print_attacked_cells(self): for", "Board.ROW_SIZE): if self.board[j][i] is not None: if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side == side:", "new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False", "None: continue figure_type = chess_board.board[j][i].figure_type side = chess_board.board[j][i].side cur_pos = Vector2d(j, i) if", "figure_type == FigureType.PAWN: latter = 'p' if side == Side.WHITE: latter = latter.upper()", "def set(self, position, game_object): self.board[position.x][position.y] = game_object def get_king_cell(self, side): for i in", "'w': self.board[j][i] = Figures.Pawn(side, cur_pos, False, True) def export_chess_board(self): export_board = ['.' for", "self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved) else: continue def serialize_to_str(self): str_board = ['.' for", "i) def get_figures_list(self, side): figures = [] for i in range(0, Board.COLUMN_SIZE): for", "enemy_king_cell in attacked_cells def is_that_mate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)):", "elif letter == 'w': self.board[j][i] = Figures.Pawn(side, cur_pos, False, True) def export_chess_board(self): export_board", "figure_type == FigureType.PAWN: was_moved = True if side == Side.WHITE: if i ==", "range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): str_board[i", "is None: continue figure_type = self.board[j][i].figure_type side = self.board[j][i].side if figure_type == FigureType.KING:", "was_moved = True if side == Side.WHITE: if cur_pos == GameBoard.default_white_rook_left_pos or cur_pos", "if side == Side.WHITE: latter = latter.upper() export_board[i * Board.ROW_SIZE + j] =", "\") sys.stdout.write(\" \") for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None:", "self.board[position.x][position.y] def set(self, position, game_object): self.board[position.x][position.y] = game_object def get_king_cell(self, side): for i", "move.point_to) def summary_attacked_cells(self, side): attacked_cells = [] for j in range(Board.ROW_SIZE): for i", "= [] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): attacked_cells.clear() figure =", "= attacked_cells + figure.generate_moves(self, my_turn) else: attacked_cells = attacked_cells + figure.generate_moves(self) for k", "1 else: sign = -1 total = total + (figure.evaluate(j, i) * sign)", "+ j] = latter return export_board def print(self): sys.stdout.write(\" \") sys.stdout.write(\" \") sys.stdout.write(\"", "sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\" \") for j in range(0, Board.ROW_SIZE): if self.board[j][i] is", "== Side.WHITE: if cur_pos == GameBoard.default_white_king_pos: was_moved = False elif side == Side.BLACK:", "else: sys.stdout.write(\"*\") sys.stdout.write(\" \") print() def print_attacked_cells(self): for i in range(0, Board.COLUMN_SIZE): for", "figure_type == FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side, cur_pos) elif figure_type == FigureType.BISHOP: self.board[j][i] =", "if new_chess_board.is_that_check(my_side) is False: return False return True def is_that_stalemate(self, my_side): enemy_figures =", "return attacked_cells def summary_moves(self, side, my_turn=True): summary_moves = [] attacked_cells = [] for", "self.board[position.x][position.y] = Figures.Bishop(side, position) if lower == 'n': self.board[position.x][position.y] = Figures.Knight(side, position) if", "letter = letter.lower() cur_pos = Vector2d(j, i) if letter == 'k': self.board[j][i] =", "Figures.King(side, cur_pos, was_moved) elif figure_type == FigureType.QUEEN: self.board[j][i] = Figures.Queen(side, cur_pos) elif figure_type", "= Vector2d(0, 0) def __init__(self, chess_board): self.board = [[None for j in range(0,", "cur_pos) elif letter == 'q': self.board[j][i] = Figures.Queen(side, cur_pos) elif letter == 'p':", "if self.board[j][i] is not None: self.board[j][i].print() sys.stdout.write(\" \") else: sys.stdout.write(\"*\") sys.stdout.write(\" \") print()", "self.board[position.x][position.y].side lower = figure_lat.lower() if lower == 'q': self.board[position.x][position.y] = Figures.Queen(side, position) if", "i in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE): res += str_board[i] return res def deserialize_from_str(self,", "self.board[j][i].side == side: return Vector2d(j, i) def get_figures_list(self, side): figures = [] for", "= self.get_by_pos(j, i) if figure is not None and figure.side == side_to_del: if", "self.board[j][i] is not None: if self.board[j][i].side == side: figures.append(self.board[j][i]) return figures def make_move(self,", "side_to_del: if isinstance(figure, Figures.Pawn): figure.double_move = False def swap_pawn(self, position, figure_lat): side =", "sign = 1 else: sign = -1 total = total + (figure.evaluate(j, i)", "attacked_cells + figure.generate_moves(self) for k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return summary_moves def", "figure = self.get(pos) if figure is not None: if figure.side is side: sign", "print() def print_attacked_cells(self): for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE):", "FigureType.BISHOP: latter = 'b' elif figure_type == FigureType.PAWN: latter = 'p' if side", "* Board.ROW_SIZE): res += str_board[i] return res def deserialize_from_str(self, board_as_str): self.board = [[None", "attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \") for k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \")", "None: attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \") for k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\"", "range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side ==", "range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return summary_moves def is_that_check(self, my_side): attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell", "return enemy_king_cell in attacked_cells def is_that_mate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in", "i in range(len(enemy_figures)): cur_figure = enemy_figures[i] available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)):", "range(0, Board.ROW_SIZE): if chess_board.board[j][i] is None: continue figure_type = chess_board.board[j][i].figure_type side = chess_board.board[j][i].side", "letter == 'k': self.board[j][i] = Figures.King(side, cur_pos, False) elif letter == 'i': self.board[j][i]", "= Figures.King(side, cur_pos, was_moved) elif figure_type == FigureType.QUEEN: self.board[j][i] = Figures.Queen(side, cur_pos) elif", "self.board[j][i] is not None: self.board[j][i].print() sys.stdout.write(\" \") else: sys.stdout.write(\"*\") sys.stdout.write(\" \") print() def", "if self.board[j][i].side == side: figures.append(self.board[j][i]) return figures def make_move(self, move): self.get(move.point_from).make_move(self, move.point_to) def", "+= str_board[i] return res def deserialize_from_str(self, board_as_str): self.board = [[None for j in", "cur_pos, False, True) def export_chess_board(self): export_board = ['.' for j in range(0, Board.ROW_SIZE)", "side = Side.WHITE else: side = Side.BLACK letter = letter.lower() cur_pos = Vector2d(j,", "in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] str_board = ['.' for j", "available_moves = cur_figure.generate_moves(self) if len(available_moves) != 0: return False else: available_moves = cur_figure.generate_moves(self)", "in range(0, Board.ROW_SIZE): str_board[i * Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE +", "+ (figure.evaluate(j, i) * sign) return total def delete_double_move(self, side_to_del): for j in", "cur_figure = enemy_figures[i] available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self)", "Side.WHITE: if cur_pos == GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos: was_moved = False elif", "letter == 'w': self.board[j][i] = Figures.Pawn(side, cur_pos, False, True) def export_chess_board(self): export_board =", "if figure_type == FigureType.KING: was_moved = True if side == Side.WHITE: if cur_pos", "get_figures_list(self, side): figures = [] for i in range(0, Board.COLUMN_SIZE): for j in", "figures = [] for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE):", "False) elif letter == 'o': self.board[j][i] = Figures.Rook(side, cur_pos, True) elif letter ==", "== 'w': self.board[j][i] = Figures.Pawn(side, cur_pos, False, True) def export_chess_board(self): export_board = ['.'", "i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not", "True if side == Side.WHITE: if cur_pos == GameBoard.default_white_king_pos: was_moved = False elif", "side): figures = [] for i in range(0, Board.COLUMN_SIZE): for j in range(0,", "* Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE + j) for i in", "Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if chess_board.board[j][i]", "is None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False return", "+ figure.generate_moves(self, True) else: attacked_cells = attacked_cells + figure.generate_moves(self) return attacked_cells def summary_moves(self,", "return summary_moves def is_that_check(self, my_side): attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell", "range(0, Board.ROW_SIZE): letter = str_board[i * Board.ROW_SIZE + j] if letter.isupper(): side =", "== FigureType.ROOK: was_moved = True if side == Side.WHITE: if cur_pos == GameBoard.default_white_rook_left_pos", "Board.COLUMN_SIZE * Board.ROW_SIZE): res += str_board[i] return res def deserialize_from_str(self, board_as_str): self.board =", "cur_pos = Vector2d(j, i) if figure_type == FigureType.KING: was_moved = True if side", "figure_lat): side = self.board[position.x][position.y].side lower = figure_lat.lower() if lower == 'q': self.board[position.x][position.y] =", "Board.ROW_SIZE): if self.board[j][i] is None: continue str_board[i * Board.ROW_SIZE + j] = self.board[j][i].serialized_letter()", "+ figure.generate_moves(self, False) elif isinstance(figure, Figures.Pawn): attacked_cells = attacked_cells + figure.generate_moves(self, True) else:", "side: figures.append(self.board[j][i]) return figures def make_move(self, move): self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self, side): attacked_cells", "in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue", "= figure_lat.lower() if lower == 'q': self.board[position.x][position.y] = Figures.Queen(side, position) if lower ==", "\") for i in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print() print() for i", "figure_type == FigureType.QUEEN: latter = 'q' elif figure_type == FigureType.ROOK: latter = 'r'", "if self.board[j][i] is not None: attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \") for k", "= False def swap_pawn(self, position, figure_lat): side = self.board[position.x][position.y].side lower = figure_lat.lower() if", "+ figure.generate_moves(self, my_turn) else: attacked_cells = attacked_cells + figure.generate_moves(self) for k in range(len(attacked_cells)):", "= attacked_cells + figure.generate_moves(self) for k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return summary_moves", "figure.generate_moves(self) for k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return summary_moves def is_that_check(self, my_side):", "attacked_cells + figure.generate_moves(self) return attacked_cells def summary_moves(self, side, my_turn=True): summary_moves = [] attacked_cells", "(figure.evaluate(j, i) * sign) return total def delete_double_move(self, side_to_del): for j in range(Board.ROW_SIZE):", "is not True: available_moves = cur_figure.generate_moves(self) if len(available_moves) != 0: return False else:", "= self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in attacked_cells def is_that_mate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for", "i), attacked_cells[k])) return summary_moves def is_that_check(self, my_side): attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side))", "Side.BLACK: if cur_pos == GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos: was_moved = False self.board[j][i]", "Board.COLUMN_SIZE)] str_board = ['.' for j in range(0, Board.ROW_SIZE) for i in range(0,", "range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): pos = Vector2d(j, i) figure = self.get(pos) if", "'i': self.board[j][i] = Figures.King(side, cur_pos, True) elif letter == 'b': self.board[j][i] = Figures.Bishop(side,", "Figures.Rook(side, cur_pos, False) elif letter == 'o': self.board[j][i] = Figures.Rook(side, cur_pos, True) elif", "Figures.Rook(side, cur_pos, True) elif letter == 'n': self.board[j][i] = Figures.Knight(side, cur_pos) elif letter", "str_board[i * Board.ROW_SIZE + j] if letter.isupper(): side = Side.WHITE else: side =", "in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return summary_moves def is_that_check(self, my_side): attacked_cells = self.summary_attacked_cells(my_side)", "in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): letter = str_board[i * Board.ROW_SIZE", "Board.ROW_SIZE): if self.board[j][i] is None: continue figure_type = self.board[j][i].figure_type side = self.board[j][i].side if", "for i in range(Board.COLUMN_SIZE): attacked_cells.clear() figure = self.get_by_pos(j, i) if figure is not", "self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] available_moves = cur_figure.generate_moves(self) for j", "= chess_board.board[j][i].figure_type side = chess_board.board[j][i].side cur_pos = Vector2d(j, i) if figure_type == FigureType.KING:", "def swap_pawn(self, position, figure_lat): side = self.board[position.x][position.y].side lower = figure_lat.lower() if lower ==", "is not None: attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \") for k in range(len(attack_cells)):", "j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if self.board[j][i].side == side:", "latter return export_board def print(self): sys.stdout.write(\" \") sys.stdout.write(\" \") sys.stdout.write(\" \") for i", "= Figures.Bishop(side, position) if lower == 'n': self.board[position.x][position.y] = Figures.Knight(side, position) if lower", "= self.board[j][i].serialized_letter() res = \"\" for i in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE): res", "sys.stdout.write(\": \") for k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print()", "game_object): self.board[position.x][position.y] = game_object def get_king_cell(self, side): for i in range(0, Board.COLUMN_SIZE): for", "self.board[j][i] = Figures.King(side, cur_pos, was_moved) elif figure_type == FigureType.QUEEN: self.board[j][i] = Figures.Queen(side, cur_pos)", "i in range(Board.COLUMN_SIZE): attacked_cells.clear() figure = self.get_by_pos(j, i) if figure is not None", "else: attacked_cells = attacked_cells + figure.generate_moves(self) return attacked_cells def summary_moves(self, side, my_turn=True): summary_moves", "return res def deserialize_from_str(self, board_as_str): self.board = [[None for j in range(0, Board.ROW_SIZE)]", "side_to_del): for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i)", "= Side.BLACK letter = letter.lower() cur_pos = Vector2d(j, i) if letter == 'k':", "GameBoard.default_black_rook_right_pos: was_moved = False self.board[j][i] = Figures.Rook(side, cur_pos, was_moved) elif figure_type == FigureType.KNIGHT:", "'r' elif figure_type == FigureType.KNIGHT: latter = 'n' elif figure_type == FigureType.BISHOP: latter", "self.board[j][i] = Figures.Rook(side, cur_pos, False) elif letter == 'o': self.board[j][i] = Figures.Rook(side, cur_pos,", "in range(0, Board.ROW_SIZE): if chess_board.board[j][i] is None: continue figure_type = chess_board.board[j][i].figure_type side =", "self.board[j][i] = Figures.Pawn(side, cur_pos, False, True) def export_chess_board(self): export_board = ['.' for j", "range(len(enemy_figures)): cur_figure = enemy_figures[i] available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board =", "if len(available_moves) != 0: return False else: available_moves = cur_figure.generate_moves(self) for j in", "elif figure_type == FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side, cur_pos) elif figure_type == FigureType.PAWN: was_moved", "False: return False return True def evaluate(self, side): total = 0 for j", "total = total + (figure.evaluate(j, i) * sign) return total def delete_double_move(self, side_to_del):", "sign) return total def delete_double_move(self, side_to_del): for j in range(Board.ROW_SIZE): for i in", "for j in range(0, Board.ROW_SIZE): str_board[i * Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i *", "was_moved = False self.board[j][i] = Figures.King(side, cur_pos, was_moved) elif figure_type == FigureType.QUEEN: self.board[j][i]", "== side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, my_turn) else: attacked_cells", "return self.board[x][y] def get(self, position): return self.board[position.x][position.y] def set(self, position, game_object): self.board[position.x][position.y] =", "in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i) if figure is", "in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i) if figure is not None and figure.side", "self.board[j][i] = Figures.Knight(side, cur_pos) elif figure_type == FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side, cur_pos) elif", "range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if", "True if side == Side.WHITE: if i == GameBoard.default_white_pawn_row: was_moved = False elif", "Figures.Bishop(side, cur_pos) elif letter == 'r': self.board[j][i] = Figures.Rook(side, cur_pos, False) elif letter", "Figures.King) is not True: available_moves = cur_figure.generate_moves(self) if len(available_moves) != 0: return False", "for j in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue figure_type = self.board[j][i].figure_type", "if i == GameBoard.default_black_pawn_row: was_moved = False self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved) else:", "== FigureType.BISHOP: latter = 'b' elif figure_type == FigureType.PAWN: latter = 'p' if", "[[None for j in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] for i", "side == Side.BLACK: if i == GameBoard.default_black_pawn_row: was_moved = False self.board[j][i] = Figures.Pawn(side,", "letter == 'b': self.board[j][i] = Figures.Bishop(side, cur_pos) elif letter == 'r': self.board[j][i] =", "figure_type == FigureType.KNIGHT: latter = 'n' elif figure_type == FigureType.BISHOP: latter = 'b'", "Vector2d.Vector2d import Vector2d, Move class GameBoard: default_white_king_pos = Vector2d(4, 7) default_black_king_pos = Vector2d(4,", "i) if figure is not None and figure.side == side_to_del: if isinstance(figure, Figures.Pawn):", "== GameBoard.default_black_king_pos: was_moved = False self.board[j][i] = Figures.King(side, cur_pos, was_moved) elif figure_type ==", "Figures.Knight(side, cur_pos) elif letter == 'q': self.board[j][i] = Figures.Queen(side, cur_pos) elif letter ==", "None: continue figure_type = self.board[j][i].figure_type side = self.board[j][i].side if figure_type == FigureType.KING: latter", "if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side == side: return Vector2d(j, i) def get_figures_list(self, side):", "= Vector2d(0, 7) default_black_rook_right_pos = Vector2d(7, 0) default_black_rook_left_pos = Vector2d(0, 0) def __init__(self,", "str(board_as_str).__getitem__(i * Board.ROW_SIZE + j) for i in range(0, Board.COLUMN_SIZE): for j in", "was_moved) elif figure_type == FigureType.QUEEN: self.board[j][i] = Figures.Queen(side, cur_pos) elif figure_type == FigureType.ROOK:", "Figures.Queen(side, cur_pos) elif figure_type == FigureType.ROOK: was_moved = True if side == Side.WHITE:", "swap_pawn(self, position, figure_lat): side = self.board[position.x][position.y].side lower = figure_lat.lower() if lower == 'q':", "for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] if isinstance(cur_figure, Figures.King) is not True:", "= Figures.Queen(side, cur_pos) elif figure_type == FigureType.ROOK: was_moved = True if side ==", "\") for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: self.board[j][i].print() sys.stdout.write(\"", "in range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE):", "get(self, position): return self.board[position.x][position.y] def set(self, position, game_object): self.board[position.x][position.y] = game_object def get_king_cell(self,", "enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] if isinstance(cur_figure, Figures.King)", "= True if side == Side.WHITE: if cur_pos == GameBoard.default_white_king_pos: was_moved = False", "FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side, cur_pos) elif figure_type == FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side, cur_pos)", "j] if letter.isupper(): side = Side.WHITE else: side = Side.BLACK letter = letter.lower()", "is not None and figure.side == side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells", "= attacked_cells + figure.generate_moves(self) return attacked_cells def summary_moves(self, side, my_turn=True): summary_moves = []", "= Figures.Rook(side, cur_pos, was_moved) elif figure_type == FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side, cur_pos) elif", "else: attacked_cells = attacked_cells + figure.generate_moves(self) for k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k]))", "True) else: attacked_cells = attacked_cells + figure.generate_moves(self) return attacked_cells def summary_moves(self, side, my_turn=True):", "= \"\" for i in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE): res += str_board[i] return", "'p' if side == Side.WHITE: latter = latter.upper() export_board[i * Board.ROW_SIZE + j]", "in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE): res += str_board[i] return res def deserialize_from_str(self, board_as_str):", "'o': self.board[j][i] = Figures.Rook(side, cur_pos, True) elif letter == 'n': self.board[j][i] = Figures.Knight(side,", "FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side, cur_pos) elif figure_type == FigureType.PAWN: was_moved = True if", "self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self, side): attacked_cells = [] for j in range(Board.ROW_SIZE): for", "def get_king_cell(self, side): for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE):", "0) default_black_rook_left_pos = Vector2d(0, 0) def __init__(self, chess_board): self.board = [[None for j", "sys.stdout.write(\" \") for i in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print() print() for", "for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] available_moves = cur_figure.generate_moves(self) for j in", "attacked_cells = [] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure =", "new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side)", "letter == 'o': self.board[j][i] = Figures.Rook(side, cur_pos, True) elif letter == 'n': self.board[j][i]", "was_moved = False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_rook_left_pos or cur_pos", "= self.board[j][i].figure_type side = self.board[j][i].side if figure_type == FigureType.KING: latter = 'k' elif", "default_black_rook_right_pos = Vector2d(7, 0) default_black_rook_left_pos = Vector2d(0, 0) def __init__(self, chess_board): self.board =", "is side: sign = 1 else: sign = -1 total = total +", "Board.ROW_SIZE): if self.board[j][i] is not None: attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \") for", "return total def delete_double_move(self, side_to_del): for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE):", "is not None: if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side == side: return Vector2d(j, i)", "= self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] available_moves = cur_figure.generate_moves(self) for", "== 'n': self.board[position.x][position.y] = Figures.Knight(side, position) if lower == 'r': self.board[position.x][position.y] = Figures.Rook(side,", "self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] if isinstance(cur_figure, Figures.King) is not", "side == Side.BLACK: if cur_pos == GameBoard.default_black_king_pos: was_moved = False self.board[j][i] = Figures.King(side,", "elif figure_type == FigureType.QUEEN: latter = 'q' elif figure_type == FigureType.ROOK: latter =", "Board.ROW_SIZE) for i in range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j", "self.board[j][i] = Figures.King(side, cur_pos, False) elif letter == 'i': self.board[j][i] = Figures.King(side, cur_pos,", "== FigureType.KING: latter = 'k' elif figure_type == FigureType.QUEEN: latter = 'q' elif", "Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): letter = str_board[i * Board.ROW_SIZE + j]", "== FigureType.KNIGHT: latter = 'n' elif figure_type == FigureType.BISHOP: latter = 'b' elif", "sys.stdout.write(\" \") else: sys.stdout.write(\"*\") sys.stdout.write(\" \") print() def print_attacked_cells(self): for i in range(0,", "self.board[j][i].figure_type side = self.board[j][i].side if figure_type == FigureType.KING: latter = 'k' elif figure_type", "for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i) if", "cur_pos, False) elif letter == 'o': self.board[j][i] = Figures.Rook(side, cur_pos, True) elif letter", "= 'q' elif figure_type == FigureType.ROOK: latter = 'r' elif figure_type == FigureType.KNIGHT:", "figure = self.get_by_pos(j, i) if figure is not None and figure.side == side_to_del:", "'n': self.board[position.x][position.y] = Figures.Knight(side, position) if lower == 'r': self.board[position.x][position.y] = Figures.Rook(side, position,", "cur_pos == GameBoard.default_white_king_pos: was_moved = False elif side == Side.BLACK: if cur_pos ==", "figure_type == FigureType.KING: was_moved = True if side == Side.WHITE: if cur_pos ==", "side == Side.WHITE: latter = latter.upper() export_board[i * Board.ROW_SIZE + j] = latter", "i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is None:", "def summary_attacked_cells(self, side): attacked_cells = [] for j in range(Board.ROW_SIZE): for i in", "'r': self.board[j][i] = Figures.Rook(side, cur_pos, False) elif letter == 'o': self.board[j][i] = Figures.Rook(side,", "= self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \") for k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__())", "== 'o': self.board[j][i] = Figures.Rook(side, cur_pos, True) elif letter == 'n': self.board[j][i] =", "print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False return True def", "= Vector2d(j, i) if letter == 'k': self.board[j][i] = Figures.King(side, cur_pos, False) elif", "continue figure_type = chess_board.board[j][i].figure_type side = chess_board.board[j][i].side cur_pos = Vector2d(j, i) if figure_type", "'a': self.board[j][i] = Figures.Pawn(side, cur_pos, True) elif letter == 'w': self.board[j][i] = Figures.Pawn(side,", "GameBoard.default_white_pawn_row: was_moved = False elif side == Side.BLACK: if i == GameBoard.default_black_pawn_row: was_moved", "default_black_king_pos = Vector2d(4, 0) default_white_pawn_row = 6 default_black_pawn_row = 1 default_white_rook_right_pos = Vector2d(7,", "lower = figure_lat.lower() if lower == 'q': self.board[position.x][position.y] = Figures.Queen(side, position) if lower", "print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False return True def evaluate(self,", "= latter.upper() export_board[i * Board.ROW_SIZE + j] = latter return export_board def print(self):", "Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, my_turn) else: attacked_cells = attacked_cells + figure.generate_moves(self)", "cur_pos) elif letter == 'r': self.board[j][i] = Figures.Rook(side, cur_pos, False) elif letter ==", "sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print() def get_by_pos(self, x, y): return self.board[x][y] def", "is not None: if figure.side is side: sign = 1 else: sign =", "def export_chess_board(self): export_board = ['.' for j in range(0, Board.ROW_SIZE) for i in", "side): attacked_cells = [] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure", "if cur_pos == GameBoard.default_black_king_pos: was_moved = False self.board[j][i] = Figures.King(side, cur_pos, was_moved) elif", "side == Side.WHITE: if i == GameBoard.default_white_pawn_row: was_moved = False elif side ==", "= str_board[i * Board.ROW_SIZE + j] if letter.isupper(): side = Side.WHITE else: side", "isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, False) elif isinstance(figure, Figures.Pawn): attacked_cells =", "self.board[j][i] = Figures.Pawn(side, cur_pos) elif letter == 'a': self.board[j][i] = Figures.Pawn(side, cur_pos, True)", "= Vector2d(j, i) if figure_type == FigureType.KING: was_moved = True if side ==", "== Side.BLACK: if cur_pos == GameBoard.default_black_king_pos: was_moved = False self.board[j][i] = Figures.King(side, cur_pos,", "True if side == Side.WHITE: if cur_pos == GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos:", "chess_board.board[j][i] is None: continue figure_type = chess_board.board[j][i].figure_type side = chess_board.board[j][i].side cur_pos = Vector2d(j,", "attacked_cells = attacked_cells + figure.generate_moves(self, my_turn) else: attacked_cells = attacked_cells + figure.generate_moves(self) for", "cur_pos, True) elif letter == 'b': self.board[j][i] = Figures.Bishop(side, cur_pos) elif letter ==", "= Figures.Knight(side, cur_pos) elif figure_type == FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side, cur_pos) elif figure_type", "= Figures.Rook(side, cur_pos, True) elif letter == 'n': self.board[j][i] = Figures.Knight(side, cur_pos) elif", "False elif side == Side.BLACK: if i == GameBoard.default_black_pawn_row: was_moved = False self.board[j][i]", "for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): letter = str_board[i", "summary_moves(self, side, my_turn=True): summary_moves = [] attacked_cells = [] for j in range(Board.ROW_SIZE):", "if i == GameBoard.default_white_pawn_row: was_moved = False elif side == Side.BLACK: if i", "== Side.WHITE: if i == GameBoard.default_white_pawn_row: was_moved = False elif side == Side.BLACK:", "== FigureType.QUEEN: self.board[j][i] = Figures.Queen(side, cur_pos) elif figure_type == FigureType.ROOK: was_moved = True", "def deserialize_from_str(self, board_as_str): self.board = [[None for j in range(0, Board.ROW_SIZE)] for i", "figure is not None and figure.side == side_to_del: if isinstance(figure, Figures.Pawn): figure.double_move =", "if new_chess_board.is_that_check(my_side) is False: return False return True def evaluate(self, side): total =", "Board from ChessBoard.chess_figure import FigureType, Side from Vector2d.Vector2d import Vector2d, Move class GameBoard:", "my_turn=True): summary_moves = [] attacked_cells = [] for j in range(Board.ROW_SIZE): for i", "elif letter == 'n': self.board[j][i] = Figures.Knight(side, cur_pos) elif letter == 'q': self.board[j][i]", "was_moved) elif figure_type == FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side, cur_pos) elif figure_type == FigureType.BISHOP:", "copy import sys import ChessAI.GameController.game_figures as Figures from ChessBoard.chess_board import Board from ChessBoard.chess_figure", "default_white_king_pos = Vector2d(4, 7) default_black_king_pos = Vector2d(4, 0) default_white_pawn_row = 6 default_black_pawn_row =", "= game_object def get_king_cell(self, side): for i in range(0, Board.COLUMN_SIZE): for j in", "Figures.King) and self.board[j][i].side == side: return Vector2d(j, i) def get_figures_list(self, side): figures =", "def print_attacked_cells(self): for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if", "i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): str_board[i * Board.ROW_SIZE +", "0) default_white_pawn_row = 6 default_black_pawn_row = 1 default_white_rook_right_pos = Vector2d(7, 7) default_white_rook_left_pos =", "j in range(0, Board.ROW_SIZE): str_board[i * Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE", "for j in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] for i in", "= Figures.Queen(side, position) if lower == 'b': self.board[position.x][position.y] = Figures.Bishop(side, position) if lower", "def print(self): sys.stdout.write(\" \") sys.stdout.write(\" \") sys.stdout.write(\" \") for i in range(0, Board.ROW_SIZE):", "cur_pos == GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos: was_moved = False self.board[j][i] = Figures.Rook(side,", "None: if figure.side is side: sign = 1 else: sign = -1 total", "cur_pos, was_moved) elif figure_type == FigureType.QUEEN: self.board[j][i] = Figures.Queen(side, cur_pos) elif figure_type ==", "in range(0, Board.ROW_SIZE) for i in range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE):", "enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] available_moves = cur_figure.generate_moves(self)", "self.board[j][i].side == side: figures.append(self.board[j][i]) return figures def make_move(self, move): self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self,", "= attacked_cells + figure.generate_moves(self, False) elif isinstance(figure, Figures.Pawn): attacked_cells = attacked_cells + figure.generate_moves(self,", "j] = self.board[j][i].serialized_letter() res = \"\" for i in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE):", "cur_pos) elif figure_type == FigureType.PAWN: was_moved = True if side == Side.WHITE: if", "my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] if isinstance(cur_figure,", "def is_that_check(self, my_side): attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in attacked_cells", "0 for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): pos = Vector2d(j, i)", "in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None:", "'q': self.board[position.x][position.y] = Figures.Queen(side, position) if lower == 'b': self.board[position.x][position.y] = Figures.Bishop(side, position)", "== GameBoard.default_black_pawn_row: was_moved = False self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved) else: continue def", "lower == 'q': self.board[position.x][position.y] = Figures.Queen(side, position) if lower == 'b': self.board[position.x][position.y] =", "continue str_board[i * Board.ROW_SIZE + j] = self.board[j][i].serialized_letter() res = \"\" for i", "side): for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i]", "Move class GameBoard: default_white_king_pos = Vector2d(4, 7) default_black_king_pos = Vector2d(4, 0) default_white_pawn_row =", "Figures.Knight(side, cur_pos) elif figure_type == FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side, cur_pos) elif figure_type ==", "new_chess_board.is_that_check(my_side) is False: return False return True def evaluate(self, side): total = 0", "latter = 'r' elif figure_type == FigureType.KNIGHT: latter = 'n' elif figure_type ==", "range(0, Board.COLUMN_SIZE * Board.ROW_SIZE): res += str_board[i] return res def deserialize_from_str(self, board_as_str): self.board", "in range(len(enemy_figures)): cur_figure = enemy_figures[i] if isinstance(cur_figure, Figures.King) is not True: available_moves =", "== 'k': self.board[j][i] = Figures.King(side, cur_pos, False) elif letter == 'i': self.board[j][i] =", "for i in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print() print() for i in", "j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): attacked_cells.clear() figure = self.get_by_pos(j, i) if", "'k' elif figure_type == FigureType.QUEEN: latter = 'q' elif figure_type == FigureType.ROOK: latter", "not None and figure.side == side_to_del: if isinstance(figure, Figures.Pawn): figure.double_move = False def", "deserialize_from_str(self, board_as_str): self.board = [[None for j in range(0, Board.ROW_SIZE)] for i in", "j in range(0, Board.ROW_SIZE): if chess_board.board[j][i] is None: continue figure_type = chess_board.board[j][i].figure_type side", "figure = self.get_by_pos(j, i) if figure is not None and figure.side == side:", "i == GameBoard.default_white_pawn_row: was_moved = False elif side == Side.BLACK: if i ==", "and figure.side == side_to_del: if isinstance(figure, Figures.Pawn): figure.double_move = False def swap_pawn(self, position,", "= 1 else: sign = -1 total = total + (figure.evaluate(j, i) *", "if chess_board.board[j][i] is None: continue figure_type = chess_board.board[j][i].figure_type side = chess_board.board[j][i].side cur_pos =", "elif figure_type == FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side, cur_pos) elif figure_type == FigureType.BISHOP: self.board[j][i]", "not True: available_moves = cur_figure.generate_moves(self) if len(available_moves) != 0: return False else: available_moves", "i) * sign) return total def delete_double_move(self, side_to_del): for j in range(Board.ROW_SIZE): for", "== side_to_del: if isinstance(figure, Figures.Pawn): figure.double_move = False def swap_pawn(self, position, figure_lat): side", "letter.lower() cur_pos = Vector2d(j, i) if letter == 'k': self.board[j][i] = Figures.King(side, cur_pos,", "= True if side == Side.WHITE: if cur_pos == GameBoard.default_white_rook_left_pos or cur_pos ==", "Figures.Pawn(side, cur_pos) elif letter == 'a': self.board[j][i] = Figures.Pawn(side, cur_pos, True) elif letter", "i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i) if figure is not None and", "for k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return summary_moves def is_that_check(self, my_side): attacked_cells", "isinstance(cur_figure, Figures.King) is not True: available_moves = cur_figure.generate_moves(self) if len(available_moves) != 0: return", "side: return Vector2d(j, i) def get_figures_list(self, side): figures = [] for i in", "latter = 'n' elif figure_type == FigureType.BISHOP: latter = 'b' elif figure_type ==", "from ChessBoard.chess_figure import FigureType, Side from Vector2d.Vector2d import Vector2d, Move class GameBoard: default_white_king_pos", "figure.generate_moves(self, True) else: attacked_cells = attacked_cells + figure.generate_moves(self) return attacked_cells def summary_moves(self, side,", "attacked_cells + figure.generate_moves(self, my_turn) else: attacked_cells = attacked_cells + figure.generate_moves(self) for k in", "+ figure.generate_moves(self) return attacked_cells def summary_moves(self, side, my_turn=True): summary_moves = [] attacked_cells =", "sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print() def get_by_pos(self, x, y): return self.board[x][y] def get(self, position):", "* Board.ROW_SIZE + j] if letter.isupper(): side = Side.WHITE else: side = Side.BLACK", "== 'b': self.board[j][i] = Figures.Bishop(side, cur_pos) elif letter == 'r': self.board[j][i] = Figures.Rook(side,", "== GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos: was_moved = False self.board[j][i] = Figures.Rook(side, cur_pos,", "Side.BLACK: if cur_pos == GameBoard.default_black_king_pos: was_moved = False self.board[j][i] = Figures.King(side, cur_pos, was_moved)", "in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if chess_board.board[j][i] is None: continue", "self.board[j][i].print() sys.stdout.write(\": \") for k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \")", "for j in range(0, Board.ROW_SIZE): letter = str_board[i * Board.ROW_SIZE + j] if", "elif side == Side.BLACK: if cur_pos == GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos: was_moved", "in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): pos = Vector2d(j, i) figure = self.get(pos)", "self.get_by_pos(j, i) if figure is not None and figure.side == side: if isinstance(figure,", "FigureType, Side from Vector2d.Vector2d import Vector2d, Move class GameBoard: default_white_king_pos = Vector2d(4, 7)", "j in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue str_board[i * Board.ROW_SIZE +", "else: side = Side.BLACK letter = letter.lower() cur_pos = Vector2d(j, i) if letter", "= False self.board[j][i] = Figures.King(side, cur_pos, was_moved) elif figure_type == FigureType.QUEEN: self.board[j][i] =", "= enemy_figures[i] if isinstance(cur_figure, Figures.King) is not True: available_moves = cur_figure.generate_moves(self) if len(available_moves)", "default_black_rook_left_pos = Vector2d(0, 0) def __init__(self, chess_board): self.board = [[None for j in", "= Figures.King(side, cur_pos, True) elif letter == 'b': self.board[j][i] = Figures.Bishop(side, cur_pos) elif", "j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i) if figure", "and figure.side == side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, my_turn)", "Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i]", "i) if letter == 'k': self.board[j][i] = Figures.King(side, cur_pos, False) elif letter ==", "side, my_turn=True): summary_moves = [] attacked_cells = [] for j in range(Board.ROW_SIZE): for", "+ j) for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): letter", "for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is", "chess_board): self.board = [[None for j in range(0, Board.ROW_SIZE)] for i in range(0,", "if self.board[j][i] is not None: if self.board[j][i].side == side: figures.append(self.board[j][i]) return figures def", "game_object def get_king_cell(self, side): for i in range(0, Board.COLUMN_SIZE): for j in range(0,", "== Side.WHITE: if cur_pos == GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos: was_moved = False", "in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE):", "* Board.ROW_SIZE + j] = latter return export_board def print(self): sys.stdout.write(\" \") sys.stdout.write(\"", "self.board = [[None for j in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)]", "is None: continue str_board[i * Board.ROW_SIZE + j] = self.board[j][i].serialized_letter() res = \"\"", "print() def get_by_pos(self, x, y): return self.board[x][y] def get(self, position): return self.board[position.x][position.y] def", "position, game_object): self.board[position.x][position.y] = game_object def get_king_cell(self, side): for i in range(0, Board.COLUMN_SIZE):", "== 'n': self.board[j][i] = Figures.Knight(side, cur_pos) elif letter == 'q': self.board[j][i] = Figures.Queen(side,", "for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): attacked_cells.clear() figure = self.get_by_pos(j, i)", "'b': self.board[position.x][position.y] = Figures.Bishop(side, position) if lower == 'n': self.board[position.x][position.y] = Figures.Knight(side, position)", "j in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue figure_type = self.board[j][i].figure_type side", "GameBoard.default_black_pawn_row: was_moved = False self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved) else: continue def serialize_to_str(self):", "self.board[j][i] = Figures.King(side, cur_pos, True) elif letter == 'b': self.board[j][i] = Figures.Bishop(side, cur_pos)", "False self.board[j][i] = Figures.King(side, cur_pos, was_moved) elif figure_type == FigureType.QUEEN: self.board[j][i] = Figures.Queen(side,", "= Figures.Bishop(side, cur_pos) elif figure_type == FigureType.PAWN: was_moved = True if side ==", "from Vector2d.Vector2d import Vector2d, Move class GameBoard: default_white_king_pos = Vector2d(4, 7) default_black_king_pos =", "range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): attacked_cells.clear() figure = self.get_by_pos(j, i) if figure is", "self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in attacked_cells def is_that_mate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i", "enemy_figures[i] if isinstance(cur_figure, Figures.King) is not True: available_moves = cur_figure.generate_moves(self) if len(available_moves) !=", "import copy import sys import ChessAI.GameController.game_figures as Figures from ChessBoard.chess_board import Board from", "export_chess_board(self): export_board = ['.' for j in range(0, Board.ROW_SIZE) for i in range(0,", "Side.BLACK letter = letter.lower() cur_pos = Vector2d(j, i) if letter == 'k': self.board[j][i]", "= [] attacked_cells = [] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE):", "if side == Side.WHITE: if cur_pos == GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos: was_moved", "cur_pos, False) elif letter == 'i': self.board[j][i] = Figures.King(side, cur_pos, True) elif letter", "def evaluate(self, side): total = 0 for j in range(Board.ROW_SIZE): for i in", "def summary_moves(self, side, my_turn=True): summary_moves = [] attacked_cells = [] for j in", "total + (figure.evaluate(j, i) * sign) return total def delete_double_move(self, side_to_del): for j", "j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if isinstance(self.board[j][i], Figures.King) and", "Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] str_board = ['.' for j in range(0,", "== Side.BLACK: if i == GameBoard.default_black_pawn_row: was_moved = False self.board[j][i] = Figures.Pawn(side, cur_pos,", "Vector2d(j, i) def get_figures_list(self, side): figures = [] for i in range(0, Board.COLUMN_SIZE):", "Figures.Pawn(side, cur_pos, False, True) def export_chess_board(self): export_board = ['.' for j in range(0,", "None and figure.side == side_to_del: if isinstance(figure, Figures.Pawn): figure.double_move = False def swap_pawn(self,", "= enemy_figures[i] available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if", "= 6 default_black_pawn_row = 1 default_white_rook_right_pos = Vector2d(7, 7) default_white_rook_left_pos = Vector2d(0, 7)", "attacked_cells def is_that_mate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure =", "= Figures.Bishop(side, cur_pos) elif letter == 'r': self.board[j][i] = Figures.Rook(side, cur_pos, False) elif", "import Vector2d, Move class GameBoard: default_white_king_pos = Vector2d(4, 7) default_black_king_pos = Vector2d(4, 0)", "elif side == Side.BLACK: if cur_pos == GameBoard.default_black_king_pos: was_moved = False self.board[j][i] =", "cur_pos, was_moved) elif figure_type == FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side, cur_pos) elif figure_type ==", "range(len(enemy_figures)): cur_figure = enemy_figures[i] if isinstance(cur_figure, Figures.King) is not True: available_moves = cur_figure.generate_moves(self)", "['.' for j in range(0, Board.ROW_SIZE) for i in range(0, Board.COLUMN_SIZE)] for i", "None: self.board[j][i].print() sys.stdout.write(\" \") else: sys.stdout.write(\"*\") sys.stdout.write(\" \") print() def print_attacked_cells(self): for i", "ChessAI.GameController.game_figures as Figures from ChessBoard.chess_board import Board from ChessBoard.chess_figure import FigureType, Side from", "if cur_pos == GameBoard.default_white_king_pos: was_moved = False elif side == Side.BLACK: if cur_pos", "True) elif letter == 'n': self.board[j][i] = Figures.Knight(side, cur_pos) elif letter == 'q':", "sys.stdout.write(\" \") print() print() for i in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\"", "import FigureType, Side from Vector2d.Vector2d import Vector2d, Move class GameBoard: default_white_king_pos = Vector2d(4,", "Vector2d(j, i) if letter == 'k': self.board[j][i] = Figures.King(side, cur_pos, False) elif letter", "side = chess_board.board[j][i].side cur_pos = Vector2d(j, i) if figure_type == FigureType.KING: was_moved =", "attacked_cells + figure.generate_moves(self, False) elif isinstance(figure, Figures.Pawn): attacked_cells = attacked_cells + figure.generate_moves(self, True)", "Vector2d(j, i) figure = self.get(pos) if figure is not None: if figure.side is", "print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False return True def is_that_stalemate(self,", "Side from Vector2d.Vector2d import Vector2d, Move class GameBoard: default_white_king_pos = Vector2d(4, 7) default_black_king_pos", "Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j", "latter = latter.upper() export_board[i * Board.ROW_SIZE + j] = latter return export_board def", "= [[None for j in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] for", "board_as_str): self.board = [[None for j in range(0, Board.ROW_SIZE)] for i in range(0,", "move): self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self, side): attacked_cells = [] for j in range(Board.ROW_SIZE):", "Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\" \") for j in range(0, Board.ROW_SIZE): if self.board[j][i]", "Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: attack_cells =", "\") for k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print() def", "False self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved) else: continue def serialize_to_str(self): str_board = ['.'", "range(0, Board.ROW_SIZE): if self.board[j][i] is not None: attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \")", "res += str_board[i] return res def deserialize_from_str(self, board_as_str): self.board = [[None for j", "if letter.isupper(): side = Side.WHITE else: side = Side.BLACK letter = letter.lower() cur_pos", "latter = 'p' if side == Side.WHITE: latter = latter.upper() export_board[i * Board.ROW_SIZE", "for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if chess_board.board[j][i] is", "return False return True def evaluate(self, side): total = 0 for j in", "self.board[j][i] = Figures.Queen(side, cur_pos) elif letter == 'p': self.board[j][i] = Figures.Pawn(side, cur_pos) elif", "Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue figure_type =", "letter == 'q': self.board[j][i] = Figures.Queen(side, cur_pos) elif letter == 'p': self.board[j][i] =", "6 default_black_pawn_row = 1 default_white_rook_right_pos = Vector2d(7, 7) default_white_rook_left_pos = Vector2d(0, 7) default_black_rook_right_pos", "side == Side.WHITE: if cur_pos == GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos: was_moved =", "\") print() def get_by_pos(self, x, y): return self.board[x][y] def get(self, position): return self.board[position.x][position.y]", "7) default_black_king_pos = Vector2d(4, 0) default_white_pawn_row = 6 default_black_pawn_row = 1 default_white_rook_right_pos =", "in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side", "= self.get(pos) if figure is not None: if figure.side is side: sign =", "position) if lower == 'b': self.board[position.x][position.y] = Figures.Bishop(side, position) if lower == 'n':", "0: return False else: available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board =", "return False return True def is_that_stalemate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in", "'b' elif figure_type == FigureType.PAWN: latter = 'p' if side == Side.WHITE: latter", "Figures.Rook(side, cur_pos, was_moved) elif figure_type == FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side, cur_pos) elif figure_type", "[] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): attacked_cells.clear() figure = self.get_by_pos(j,", "i in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\" \") for j in range(0,", "self.board[position.x][position.y] = Figures.Queen(side, position) if lower == 'b': self.board[position.x][position.y] = Figures.Bishop(side, position) if", "Figures.Pawn(side, cur_pos, was_moved) else: continue def serialize_to_str(self): str_board = ['.' for j in", "figure.side == side_to_del: if isinstance(figure, Figures.Pawn): figure.double_move = False def swap_pawn(self, position, figure_lat):", "as Figures from ChessBoard.chess_board import Board from ChessBoard.chess_figure import FigureType, Side from Vector2d.Vector2d", "cur_pos == GameBoard.default_black_rook_right_pos: was_moved = False self.board[j][i] = Figures.Rook(side, cur_pos, was_moved) elif figure_type", "False def swap_pawn(self, position, figure_lat): side = self.board[position.x][position.y].side lower = figure_lat.lower() if lower", "export_board = ['.' for j in range(0, Board.ROW_SIZE) for i in range(0, Board.COLUMN_SIZE)]", "else: available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position)", "False return True def evaluate(self, side): total = 0 for j in range(Board.ROW_SIZE):", "GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos: was_moved = False elif side == Side.BLACK: if", "range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): str_board[i * Board.ROW_SIZE + j] =", "range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue figure_type", "import Board from ChessBoard.chess_figure import FigureType, Side from Vector2d.Vector2d import Vector2d, Move class", "cur_pos == GameBoard.default_white_rook_right_pos: was_moved = False elif side == Side.BLACK: if cur_pos ==", "total def delete_double_move(self, side_to_del): for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure", "was_moved = True if side == Side.WHITE: if cur_pos == GameBoard.default_white_king_pos: was_moved =", "if lower == 'q': self.board[position.x][position.y] = Figures.Queen(side, position) if lower == 'b': self.board[position.x][position.y]", "if self.board[j][i] is None: continue figure_type = self.board[j][i].figure_type side = self.board[j][i].side if figure_type", "or cur_pos == GameBoard.default_black_rook_right_pos: was_moved = False self.board[j][i] = Figures.Rook(side, cur_pos, was_moved) elif", "y): return self.board[x][y] def get(self, position): return self.board[position.x][position.y] def set(self, position, game_object): self.board[position.x][position.y]", "== 'b': self.board[position.x][position.y] = Figures.Bishop(side, position) if lower == 'n': self.board[position.x][position.y] = Figures.Knight(side,", "default_white_rook_left_pos = Vector2d(0, 7) default_black_rook_right_pos = Vector2d(7, 0) default_black_rook_left_pos = Vector2d(0, 0) def", "Vector2d(7, 7) default_white_rook_left_pos = Vector2d(0, 7) default_black_rook_right_pos = Vector2d(7, 0) default_black_rook_left_pos = Vector2d(0,", "None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False return True", "Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): str_board[i * Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i", "'n': self.board[j][i] = Figures.Knight(side, cur_pos) elif letter == 'q': self.board[j][i] = Figures.Queen(side, cur_pos)", "== FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side, cur_pos) elif figure_type == FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side,", "self.board[j][i].print() sys.stdout.write(\" \") else: sys.stdout.write(\"*\") sys.stdout.write(\" \") print() def print_attacked_cells(self): for i in", "__init__(self, chess_board): self.board = [[None for j in range(0, Board.ROW_SIZE)] for i in", "sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print() def get_by_pos(self, x, y): return self.board[x][y]", "letter = str_board[i * Board.ROW_SIZE + j] if letter.isupper(): side = Side.WHITE else:", "\"\" for i in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE): res += str_board[i] return res", "set(self, position, game_object): self.board[position.x][position.y] = game_object def get_king_cell(self, side): for i in range(0,", "self.get(pos) if figure is not None: if figure.side is side: sign = 1", "FigureType.KING: was_moved = True if side == Side.WHITE: if cur_pos == GameBoard.default_white_king_pos: was_moved", "cur_pos) elif letter == 'p': self.board[j][i] = Figures.Pawn(side, cur_pos) elif letter == 'a':", "\") print() def print_attacked_cells(self): for i in range(0, Board.COLUMN_SIZE): for j in range(0,", "'q' elif figure_type == FigureType.ROOK: latter = 'r' elif figure_type == FigureType.KNIGHT: latter", "range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i) if figure is not", "is None: continue figure_type = chess_board.board[j][i].figure_type side = chess_board.board[j][i].side cur_pos = Vector2d(j, i)", "attacked_cells = attacked_cells + figure.generate_moves(self, True) else: attacked_cells = attacked_cells + figure.generate_moves(self) return", "attacked_cells.clear() figure = self.get_by_pos(j, i) if figure is not None and figure.side ==", "my_side): attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in attacked_cells def is_that_mate(self,", "= Side.WHITE else: side = Side.BLACK letter = letter.lower() cur_pos = Vector2d(j, i)", "in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j]))", "self.board[j][i] = Figures.Knight(side, cur_pos) elif letter == 'q': self.board[j][i] = Figures.Queen(side, cur_pos) elif", "cur_figure = enemy_figures[i] if isinstance(cur_figure, Figures.King) is not True: available_moves = cur_figure.generate_moves(self) if", "x, y): return self.board[x][y] def get(self, position): return self.board[position.x][position.y] def set(self, position, game_object):", "Vector2d(0, 7) default_black_rook_right_pos = Vector2d(7, 0) default_black_rook_left_pos = Vector2d(0, 0) def __init__(self, chess_board):", "return Vector2d(j, i) def get_figures_list(self, side): figures = [] for i in range(0,", "== FigureType.PAWN: was_moved = True if side == Side.WHITE: if i == GameBoard.default_white_pawn_row:", "default_white_rook_right_pos = Vector2d(7, 7) default_white_rook_left_pos = Vector2d(0, 7) default_black_rook_right_pos = Vector2d(7, 0) default_black_rook_left_pos", "elif letter == 'o': self.board[j][i] = Figures.Rook(side, cur_pos, True) elif letter == 'n':", "cur_pos == GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos: was_moved = False elif side ==", "import sys import ChessAI.GameController.game_figures as Figures from ChessBoard.chess_board import Board from ChessBoard.chess_figure import", "latter = 'q' elif figure_type == FigureType.ROOK: latter = 'r' elif figure_type ==", "figure.side == side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, False) elif", "if figure.side is side: sign = 1 else: sign = -1 total =", "serialize_to_str(self): str_board = ['.' for j in range(0, Board.ROW_SIZE) for i in range(0,", "in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: self.board[j][i].print() sys.stdout.write(\" \") else: sys.stdout.write(\"*\")", "attacked_cells def summary_moves(self, side, my_turn=True): summary_moves = [] attacked_cells = [] for j", "my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] available_moves =", "if side == Side.WHITE: if cur_pos == GameBoard.default_white_king_pos: was_moved = False elif side", "j in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position,", "range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for", "Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE + j) for i in range(0,", "def is_that_mate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i]", "elif letter == 'b': self.board[j][i] = Figures.Bishop(side, cur_pos) elif letter == 'r': self.board[j][i]", "position, figure_lat): side = self.board[position.x][position.y].side lower = figure_lat.lower() if lower == 'q': self.board[position.x][position.y]", "j in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] str_board = ['.' for", "= False elif side == Side.BLACK: if i == GameBoard.default_black_pawn_row: was_moved = False", "Figures.Queen(side, cur_pos) elif letter == 'p': self.board[j][i] = Figures.Pawn(side, cur_pos) elif letter ==", "Side.WHITE else: side = Side.BLACK letter = letter.lower() cur_pos = Vector2d(j, i) if", "\") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print() def get_by_pos(self, x, y): return self.board[x][y] def get(self,", "attacked_cells + figure.generate_moves(self, True) else: attacked_cells = attacked_cells + figure.generate_moves(self) return attacked_cells def", "[] for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i]", "latter.upper() export_board[i * Board.ROW_SIZE + j] = latter return export_board def print(self): sys.stdout.write(\"", "= self.get_by_pos(j, i) if figure is not None and figure.side == side: if", "attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in attacked_cells def is_that_mate(self, my_side):", "= False self.board[j][i] = Figures.Rook(side, cur_pos, was_moved) elif figure_type == FigureType.KNIGHT: self.board[j][i] =", "return True def is_that_stalemate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure", "for j in range(0, Board.ROW_SIZE) for i in range(0, Board.COLUMN_SIZE)] for i in", "FigureType.PAWN: latter = 'p' if side == Side.WHITE: latter = latter.upper() export_board[i *", "figure.generate_moves(self) return attacked_cells def summary_moves(self, side, my_turn=True): summary_moves = [] attacked_cells = []", "in range(0, Board.ROW_SIZE): letter = str_board[i * Board.ROW_SIZE + j] if letter.isupper(): side", "was_moved = False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_king_pos: was_moved =", "figure_type == FigureType.KING: latter = 'k' elif figure_type == FigureType.QUEEN: latter = 'q'", "sign = -1 total = total + (figure.evaluate(j, i) * sign) return total", "= True if side == Side.WHITE: if i == GameBoard.default_white_pawn_row: was_moved = False", "= Vector2d(7, 7) default_white_rook_left_pos = Vector2d(0, 7) default_black_rook_right_pos = Vector2d(7, 0) default_black_rook_left_pos =", "+ j] = self.board[j][i].serialized_letter() res = \"\" for i in range(0, Board.COLUMN_SIZE *", "for j in range(0, Board.ROW_SIZE): if chess_board.board[j][i] is None: continue figure_type = chess_board.board[j][i].figure_type", "print() for i in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\" \") for j", "in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print() print() for i in range(0, Board.COLUMN_SIZE):", "k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return summary_moves def is_that_check(self, my_side): attacked_cells =", "figure_type = self.board[j][i].figure_type side = self.board[j][i].side if figure_type == FigureType.KING: latter = 'k'", "= Figures.Pawn(side, cur_pos, True) elif letter == 'w': self.board[j][i] = Figures.Pawn(side, cur_pos, False,", "== Side.BLACK: if cur_pos == GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos: was_moved = False", "available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False return True def evaluate(self, side): total", "available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False return True def is_that_stalemate(self, my_side): enemy_figures", "figure.generate_moves(self, False) elif isinstance(figure, Figures.Pawn): attacked_cells = attacked_cells + figure.generate_moves(self, True) else: attacked_cells", "== GameBoard.default_white_king_pos: was_moved = False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_king_pos:", "== GameBoard.default_white_pawn_row: was_moved = False elif side == Side.BLACK: if i == GameBoard.default_black_pawn_row:", "sys.stdout.write(\"; \") print() def get_by_pos(self, x, y): return self.board[x][y] def get(self, position): return", "not None and figure.side == side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells +", "self.get_by_pos(j, i) if figure is not None and figure.side == side_to_del: if isinstance(figure,", "latter = 'b' elif figure_type == FigureType.PAWN: latter = 'p' if side ==", "= False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_rook_left_pos or cur_pos ==", "is_that_stalemate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i] if", "Vector2d(0, 0) def __init__(self, chess_board): self.board = [[None for j in range(0, Board.ROW_SIZE)]", "self.board[j][i] = Figures.Bishop(side, cur_pos) elif letter == 'r': self.board[j][i] = Figures.Rook(side, cur_pos, False)", "ChessBoard.chess_board import Board from ChessBoard.chess_figure import FigureType, Side from Vector2d.Vector2d import Vector2d, Move", "was_moved = False self.board[j][i] = Figures.Rook(side, cur_pos, was_moved) elif figure_type == FigureType.KNIGHT: self.board[j][i]", "range(0, Board.ROW_SIZE): if self.board[j][i] is not None: self.board[j][i].print() sys.stdout.write(\" \") else: sys.stdout.write(\"*\") sys.stdout.write(\"", "attacked_cells = [] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): attacked_cells.clear() figure", "get_by_pos(self, x, y): return self.board[x][y] def get(self, position): return self.board[position.x][position.y] def set(self, position,", "isinstance(self.board[j][i], Figures.King) and self.board[j][i].side == side: return Vector2d(j, i) def get_figures_list(self, side): figures", "new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False return True def is_that_stalemate(self, my_side):", "def get_figures_list(self, side): figures = [] for i in range(0, Board.COLUMN_SIZE): for j", "'p': self.board[j][i] = Figures.Pawn(side, cur_pos) elif letter == 'a': self.board[j][i] = Figures.Pawn(side, cur_pos,", "GameBoard: default_white_king_pos = Vector2d(4, 7) default_black_king_pos = Vector2d(4, 0) default_white_pawn_row = 6 default_black_pawn_row", "cur_pos = Vector2d(j, i) if letter == 'k': self.board[j][i] = Figures.King(side, cur_pos, False)", "def delete_double_move(self, side_to_del): for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): figure =", "cur_pos, True) elif letter == 'n': self.board[j][i] = Figures.Knight(side, cur_pos) elif letter ==", "elif letter == 'p': self.board[j][i] = Figures.Pawn(side, cur_pos) elif letter == 'a': self.board[j][i]", "default_black_pawn_row = 1 default_white_rook_right_pos = Vector2d(7, 7) default_white_rook_left_pos = Vector2d(0, 7) default_black_rook_right_pos =", "Vector2d(7, 0) default_black_rook_left_pos = Vector2d(0, 0) def __init__(self, chess_board): self.board = [[None for", "= copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is", "range(0, Board.COLUMN_SIZE)] str_board = ['.' for j in range(0, Board.ROW_SIZE) for i in", "summary_moves = [] attacked_cells = [] for j in range(Board.ROW_SIZE): for i in", "False) elif isinstance(figure, Figures.Pawn): attacked_cells = attacked_cells + figure.generate_moves(self, True) else: attacked_cells =", "sys.stdout.write(\"*\") sys.stdout.write(\" \") print() def print_attacked_cells(self): for i in range(0, Board.COLUMN_SIZE): for j", "lower == 'b': self.board[position.x][position.y] = Figures.Bishop(side, position) if lower == 'n': self.board[position.x][position.y] =", "range(Board.COLUMN_SIZE): pos = Vector2d(j, i) figure = self.get(pos) if figure is not None:", "= cur_figure.generate_moves(self) if len(available_moves) != 0: return False else: available_moves = cur_figure.generate_moves(self) for", "self.board[x][y] def get(self, position): return self.board[position.x][position.y] def set(self, position, game_object): self.board[position.x][position.y] = game_object", "figure_type == FigureType.ROOK: was_moved = True if side == Side.WHITE: if cur_pos ==", "Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if isinstance(self.board[j][i],", "elif letter == 'r': self.board[j][i] = Figures.Rook(side, cur_pos, False) elif letter == 'o':", "side == Side.WHITE: if cur_pos == GameBoard.default_white_king_pos: was_moved = False elif side ==", "range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print() print() for i in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__())", "Board.ROW_SIZE): str_board[i * Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE + j) for", "in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\":", "range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if", "True def is_that_stalemate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure =", "GameBoard.default_white_rook_right_pos: was_moved = False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_rook_left_pos or", "self.board[j][i] is None: continue str_board[i * Board.ROW_SIZE + j] = self.board[j][i].serialized_letter() res =", "chess_board.board[j][i].figure_type side = chess_board.board[j][i].side cur_pos = Vector2d(j, i) if figure_type == FigureType.KING: was_moved", "7) default_black_rook_right_pos = Vector2d(7, 0) default_black_rook_left_pos = Vector2d(0, 0) def __init__(self, chess_board): self.board", "cur_pos) elif figure_type == FigureType.ROOK: was_moved = True if side == Side.WHITE: if", "default_white_pawn_row = 6 default_black_pawn_row = 1 default_white_rook_right_pos = Vector2d(7, 7) default_white_rook_left_pos = Vector2d(0,", "i in range(0, Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j in range(0,", "FigureType.PAWN: was_moved = True if side == Side.WHITE: if i == GameBoard.default_white_pawn_row: was_moved", "* Board.ROW_SIZE + j) for i in range(0, Board.COLUMN_SIZE): for j in range(0,", "j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: self.board[j][i].print() sys.stdout.write(\" \") else:", "not None: attack_cells = self.board[j][i].generate_moves(self) self.board[j][i].print() sys.stdout.write(\": \") for k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__())", "if self.board[j][i] is not None: if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side == side: return", "for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x) print(cur_figure.position.y)", "return export_board def print(self): sys.stdout.write(\" \") sys.stdout.write(\" \") sys.stdout.write(\" \") for i in", "[] attacked_cells = [] for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): attacked_cells.clear()", "+ figure.generate_moves(self) for k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return summary_moves def is_that_check(self,", "cur_pos) elif figure_type == FigureType.BISHOP: self.board[j][i] = Figures.Bishop(side, cur_pos) elif figure_type == FigureType.PAWN:", "available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is", "enemy_figures[i] available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position)", "range(len(available_moves)): new_chess_board = copy.deepcopy(self) if new_chess_board.get(cur_figure.position) is None: print(cur_figure.position.x) print(cur_figure.position.y) new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if", "True) def export_chess_board(self): export_board = ['.' for j in range(0, Board.ROW_SIZE) for i", "get_king_cell(self, side): for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if", "GameBoard.default_black_king_pos: was_moved = False self.board[j][i] = Figures.King(side, cur_pos, was_moved) elif figure_type == FigureType.QUEEN:", "in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue figure_type = self.board[j][i].figure_type side =", "def is_that_stalemate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)): cur_figure = enemy_figures[i]", "figure_type == FigureType.BISHOP: latter = 'b' elif figure_type == FigureType.PAWN: latter = 'p'", "latter = 'k' elif figure_type == FigureType.QUEEN: latter = 'q' elif figure_type ==", "Figures.Pawn(side, cur_pos, True) elif letter == 'w': self.board[j][i] = Figures.Pawn(side, cur_pos, False, True)", "self.board[j][i] = Figures.Rook(side, cur_pos, was_moved) elif figure_type == FigureType.KNIGHT: self.board[j][i] = Figures.Knight(side, cur_pos)", "chess_board.board[j][i].side cur_pos = Vector2d(j, i) if figure_type == FigureType.KING: was_moved = True if", "elif figure_type == FigureType.BISHOP: latter = 'b' elif figure_type == FigureType.PAWN: latter =", "side: sign = 1 else: sign = -1 total = total + (figure.evaluate(j,", "str_board[i * Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE + j) for i", "if cur_pos == GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos: was_moved = False elif side", "print() print() for i in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\" \") for", "if cur_pos == GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos: was_moved = False self.board[j][i] =", "elif figure_type == FigureType.QUEEN: self.board[j][i] = Figures.Queen(side, cur_pos) elif figure_type == FigureType.ROOK: was_moved", "else: sign = -1 total = total + (figure.evaluate(j, i) * sign) return", "figure.side == side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, my_turn) else:", "figure.side is side: sign = 1 else: sign = -1 total = total", "Board.ROW_SIZE + j] = latter return export_board def print(self): sys.stdout.write(\" \") sys.stdout.write(\" \")", "range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if self.board[j][i].side == side: figures.append(self.board[j][i]) return", "None: if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side == side: return Vector2d(j, i) def get_figures_list(self,", "continue def serialize_to_str(self): str_board = ['.' for j in range(0, Board.ROW_SIZE) for i", "\") sys.stdout.write(\" \") sys.stdout.write(\" \") for i in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \")", "k in range(len(attack_cells)): sys.stdout.write(attack_cells[k].x.__str__()) sys.stdout.write(\" \") sys.stdout.write(attack_cells[k].y.__str__()) sys.stdout.write(\"; \") print() def get_by_pos(self, x,", "for j in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue str_board[i * Board.ROW_SIZE", "FigureType.KNIGHT: latter = 'n' elif figure_type == FigureType.BISHOP: latter = 'b' elif figure_type", "return figures def make_move(self, move): self.get(move.point_from).make_move(self, move.point_to) def summary_attacked_cells(self, side): attacked_cells = []", "False: return False return True def is_that_stalemate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i", "range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue figure_type = self.board[j][i].figure_type side = self.board[j][i].side", "letter == 'p': self.board[j][i] = Figures.Pawn(side, cur_pos) elif letter == 'a': self.board[j][i] =", "attacked_cells[k])) return summary_moves def is_that_check(self, my_side): attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return", "== 'r': self.board[j][i] = Figures.Rook(side, cur_pos, False) elif letter == 'o': self.board[j][i] =", "self.board[j][i].serialized_letter() res = \"\" for i in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE): res +=", "= 'p' if side == Side.WHITE: latter = latter.upper() export_board[i * Board.ROW_SIZE +", "for i in range(0, Board.COLUMN_SIZE)] str_board = ['.' for j in range(0, Board.ROW_SIZE)", "sys.stdout.write(\" \") sys.stdout.write(\" \") sys.stdout.write(\" \") for i in range(0, Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\"", "!= 0: return False else: available_moves = cur_figure.generate_moves(self) for j in range(len(available_moves)): new_chess_board", "Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: if self.board[j][i].side", "Figures.Pawn): figure.double_move = False def swap_pawn(self, position, figure_lat): side = self.board[position.x][position.y].side lower =", "cur_pos, was_moved) else: continue def serialize_to_str(self): str_board = ['.' for j in range(0,", "False elif side == Side.BLACK: if cur_pos == GameBoard.default_black_king_pos: was_moved = False self.board[j][i]", "for i in range(Board.COLUMN_SIZE): figure = self.get_by_pos(j, i) if figure is not None", "isinstance(figure, Figures.Pawn): attacked_cells = attacked_cells + figure.generate_moves(self, True) else: attacked_cells = attacked_cells +", "range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if chess_board.board[j][i] is None: continue figure_type", "str_board[i * Board.ROW_SIZE + j] = self.board[j][i].serialized_letter() res = \"\" for i in", "Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, False) elif isinstance(figure, Figures.Pawn): attacked_cells = attacked_cells", "j) for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): letter =", "j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): pos = Vector2d(j, i) figure =", "== FigureType.KING: was_moved = True if side == Side.WHITE: if cur_pos == GameBoard.default_white_king_pos:", "str_board = ['.' for j in range(0, Board.ROW_SIZE) for i in range(0, Board.COLUMN_SIZE)]", "= False self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved) else: continue def serialize_to_str(self): str_board =", "in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") sys.stdout.write(\" \") for j in range(0, Board.ROW_SIZE):", "in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): attacked_cells.clear() figure = self.get_by_pos(j, i) if figure", "+ j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE + j) for i in range(0, Board.COLUMN_SIZE):", "def get(self, position): return self.board[position.x][position.y] def set(self, position, game_object): self.board[position.x][position.y] = game_object def", "continue figure_type = self.board[j][i].figure_type side = self.board[j][i].side if figure_type == FigureType.KING: latter =", "new_chess_board.make_move(Move(cur_figure.position, available_moves[j])) if new_chess_board.is_that_check(my_side) is False: return False return True def evaluate(self, side):", "attacked_cells = attacked_cells + figure.generate_moves(self) for k in range(len(attacked_cells)): summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return", "= ['.' for j in range(0, Board.ROW_SIZE) for i in range(0, Board.COLUMN_SIZE)] for", "= [] for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if", "i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if chess_board.board[j][i] is None:", "side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, False) elif isinstance(figure, Figures.Pawn):", "= Vector2d(4, 7) default_black_king_pos = Vector2d(4, 0) default_white_pawn_row = 6 default_black_pawn_row = 1", "Figures.Bishop(side, cur_pos) elif figure_type == FigureType.PAWN: was_moved = True if side == Side.WHITE:", "if lower == 'b': self.board[position.x][position.y] = Figures.Bishop(side, position) if lower == 'n': self.board[position.x][position.y]", "export_board def print(self): sys.stdout.write(\" \") sys.stdout.write(\" \") sys.stdout.write(\" \") for i in range(0,", "* Board.ROW_SIZE + j] = self.board[j][i].serialized_letter() res = \"\" for i in range(0,", "def __init__(self, chess_board): self.board = [[None for j in range(0, Board.ROW_SIZE)] for i", "total = 0 for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE): pos =", "figure.double_move = False def swap_pawn(self, position, figure_lat): side = self.board[position.x][position.y].side lower = figure_lat.lower()", "is not None: self.board[j][i].print() sys.stdout.write(\" \") else: sys.stdout.write(\"*\") sys.stdout.write(\" \") print() def print_attacked_cells(self):", "cur_figure.generate_moves(self) if len(available_moves) != 0: return False else: available_moves = cur_figure.generate_moves(self) for j", "if isinstance(cur_figure, Figures.King) is not True: available_moves = cur_figure.generate_moves(self) if len(available_moves) != 0:", "self.board[j][i] = Figures.Rook(side, cur_pos, True) elif letter == 'n': self.board[j][i] = Figures.Knight(side, cur_pos)", "Board.ROW_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\" \") print() print() for i in range(0, Board.COLUMN_SIZE): sys.stdout.write(i.__str__()) sys.stdout.write(\"", "== 'q': self.board[j][i] = Figures.Queen(side, cur_pos) elif letter == 'p': self.board[j][i] = Figures.Pawn(side,", "Board.COLUMN_SIZE)] for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): str_board[i *", "True) elif letter == 'b': self.board[j][i] = Figures.Bishop(side, cur_pos) elif letter == 'r':", "evaluate(self, side): total = 0 for j in range(Board.ROW_SIZE): for i in range(Board.COLUMN_SIZE):", "i == GameBoard.default_black_pawn_row: was_moved = False self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved) else: continue", "is not None and figure.side == side_to_del: if isinstance(figure, Figures.Pawn): figure.double_move = False", "Figures.King(side, cur_pos, True) elif letter == 'b': self.board[j][i] = Figures.Bishop(side, cur_pos) elif letter", "FigureType.QUEEN: self.board[j][i] = Figures.Queen(side, cur_pos) elif figure_type == FigureType.ROOK: was_moved = True if", "in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): str_board[i * Board.ROW_SIZE + j]", "range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): letter = str_board[i * Board.ROW_SIZE +", "FigureType.ROOK: latter = 'r' elif figure_type == FigureType.KNIGHT: latter = 'n' elif figure_type", "= Figures.Pawn(side, cur_pos, was_moved) else: continue def serialize_to_str(self): str_board = ['.' for j", "if figure is not None: if figure.side is side: sign = 1 else:", "j in range(0, Board.ROW_SIZE): letter = str_board[i * Board.ROW_SIZE + j] if letter.isupper():", "Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if chess_board.board[j][i] is None: continue figure_type =", "self.board[j][i] = Figures.Pawn(side, cur_pos, True) elif letter == 'w': self.board[j][i] = Figures.Pawn(side, cur_pos,", "str_board[i] return res def deserialize_from_str(self, board_as_str): self.board = [[None for j in range(0,", "self.board[position.x][position.y] = game_object def get_king_cell(self, side): for i in range(0, Board.COLUMN_SIZE): for j", "False return True def is_that_stalemate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side)) for i in range(len(enemy_figures)):", "== Side.WHITE: latter = latter.upper() export_board[i * Board.ROW_SIZE + j] = latter return", "Board.ROW_SIZE): letter = str_board[i * Board.ROW_SIZE + j] if letter.isupper(): side = Side.WHITE", "range(Board.COLUMN_SIZE): attacked_cells.clear() figure = self.get_by_pos(j, i) if figure is not None and figure.side", "new_chess_board.is_that_check(my_side) is False: return False return True def is_that_stalemate(self, my_side): enemy_figures = self.get_figures_list(Side.get_oposite(my_side))", "Board.ROW_SIZE): if self.board[j][i] is not None: if self.board[j][i].side == side: figures.append(self.board[j][i]) return figures", "j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE + j) for i in range(0, Board.COLUMN_SIZE): for", "= Vector2d(4, 0) default_white_pawn_row = 6 default_black_pawn_row = 1 default_white_rook_right_pos = Vector2d(7, 7)", "= str(board_as_str).__getitem__(i * Board.ROW_SIZE + j) for i in range(0, Board.COLUMN_SIZE): for j", "for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: self.board[j][i].print() sys.stdout.write(\" \")", "FigureType.QUEEN: latter = 'q' elif figure_type == FigureType.ROOK: latter = 'r' elif figure_type", "Vector2d(4, 0) default_white_pawn_row = 6 default_black_pawn_row = 1 default_white_rook_right_pos = Vector2d(7, 7) default_white_rook_left_pos", "= Vector2d(j, i) figure = self.get(pos) if figure is not None: if figure.side", "Side.WHITE: if i == GameBoard.default_white_pawn_row: was_moved = False elif side == Side.BLACK: if", "[[None for j in range(0, Board.ROW_SIZE)] for i in range(0, Board.COLUMN_SIZE)] str_board =", "-1 total = total + (figure.evaluate(j, i) * sign) return total def delete_double_move(self,", "letter == 'r': self.board[j][i] = Figures.Rook(side, cur_pos, False) elif letter == 'o': self.board[j][i]", "== GameBoard.default_black_rook_right_pos: was_moved = False self.board[j][i] = Figures.Rook(side, cur_pos, was_moved) elif figure_type ==", "summary_moves.append(Move(Vector2d(j, i), attacked_cells[k])) return summary_moves def is_that_check(self, my_side): attacked_cells = self.summary_attacked_cells(my_side) enemy_king_cell =", "in range(Board.COLUMN_SIZE): pos = Vector2d(j, i) figure = self.get(pos) if figure is not", "= letter.lower() cur_pos = Vector2d(j, i) if letter == 'k': self.board[j][i] = Figures.King(side,", "range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i] is None: continue str_board[i", "not None: if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side == side: return Vector2d(j, i) def", "== side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, False) elif isinstance(figure,", "export_board[i * Board.ROW_SIZE + j] = latter return export_board def print(self): sys.stdout.write(\" \")", "attacked_cells = attacked_cells + figure.generate_moves(self, False) elif isinstance(figure, Figures.Pawn): attacked_cells = attacked_cells +", "= total + (figure.evaluate(j, i) * sign) return total def delete_double_move(self, side_to_del): for", "= chess_board.board[j][i].side cur_pos = Vector2d(j, i) if figure_type == FigureType.KING: was_moved = True", "None and figure.side == side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self,", "range(0, Board.ROW_SIZE): str_board[i * Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE + j)", "letter == 'a': self.board[j][i] = Figures.Pawn(side, cur_pos, True) elif letter == 'w': self.board[j][i]", "in range(0, Board.COLUMN_SIZE)] str_board = ['.' for j in range(0, Board.ROW_SIZE) for i", "for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not None: attack_cells = self.board[j][i].generate_moves(self)", "if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, False) elif isinstance(figure, Figures.Pawn): attacked_cells", "sys.stdout.write(\" \") sys.stdout.write(\" \") for j in range(0, Board.ROW_SIZE): if self.board[j][i] is not", "figure_type == FigureType.ROOK: latter = 'r' elif figure_type == FigureType.KNIGHT: latter = 'n'", "False) elif letter == 'i': self.board[j][i] = Figures.King(side, cur_pos, True) elif letter ==", "print_attacked_cells(self): for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE): if self.board[j][i]", "== side: return Vector2d(j, i) def get_figures_list(self, side): figures = [] for i", "= Figures.Pawn(side, cur_pos) elif letter == 'a': self.board[j][i] = Figures.Pawn(side, cur_pos, True) elif", "False, True) def export_chess_board(self): export_board = ['.' for j in range(0, Board.ROW_SIZE) for", "and figure.side == side: if isinstance(figure, Figures.King): attacked_cells = attacked_cells + figure.generate_moves(self, False)", "= -1 total = total + (figure.evaluate(j, i) * sign) return total def", "Board.ROW_SIZE): res += str_board[i] return res def deserialize_from_str(self, board_as_str): self.board = [[None for", "elif letter == 'q': self.board[j][i] = Figures.Queen(side, cur_pos) elif letter == 'p': self.board[j][i]", "or cur_pos == GameBoard.default_white_rook_right_pos: was_moved = False elif side == Side.BLACK: if cur_pos", "= self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in attacked_cells def is_that_mate(self, my_side): enemy_figures", "= 'r' elif figure_type == FigureType.KNIGHT: latter = 'n' elif figure_type == FigureType.BISHOP:", "position) if lower == 'n': self.board[position.x][position.y] = Figures.Knight(side, position) if lower == 'r':", "self.summary_attacked_cells(my_side) enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side)) return enemy_king_cell in attacked_cells def is_that_mate(self, my_side): enemy_figures =", "FigureType.KING: latter = 'k' elif figure_type == FigureType.QUEEN: latter = 'q' elif figure_type", "Board.ROW_SIZE + j) for i in range(0, Board.COLUMN_SIZE): for j in range(0, Board.ROW_SIZE):" ]
[ "== \"__main__\": if len(sys.argv) < 2: print(f\"usage: python3 {sys.argv[0]} [pools file]\") sys.exit(0) denom_price_map", "<gh_stars>1-10 import csv import sys import requests def get_pools(): url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r", "import sys import requests def get_pools(): url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url) return", "\"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url) return r.json()[\"verified_denoms\"] def get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\" r =", "= denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r = requests.get(url) if r.status_code !=", "denom r = requests.get(url) if r.status_code != 200: return None return r.json()[\"verify_trace\"] def", "200: return None return r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set = set() for pool in", "+ addr r = requests.get(url) return r.json()[\"balances\"] def get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\" r", "return r.json()[\"data\"] def get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r", "return r.json()[\"balances\"] def get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url) return r.json()[\"verified_denoms\"] def", "requests.get(url) return r.json()[\"balances\"] def get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url) return r.json()[\"verified_denoms\"]", "\"__main__\": if len(sys.argv) < 2: print(f\"usage: python3 {sys.argv[0]} [pools file]\") sys.exit(0) denom_price_map =", "tvl += vl print(f\"total value locked: {tvl}\") swap_amount = 0.0 fee_amount = 0.0", "= 0.0 with open(sys.argv[1], newline=\"\") as f: reader = csv.DictReader(f) for row in", "\"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r = requests.get(url) return r.json()[\"balances\"] def get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\"", "{} for denom in denom_set: if denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom) if not info:", "+= int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except KeyError: continue tvl += vl print(f\"total value locked:", "price_map = {x[\"Symbol\"]: x[\"Price\"] for x in prices[\"Tokens\"]} denom_price_map = {} for denom", "denom_price_map[denom] = price / pow(10, precision) return denom_price_map if __name__ == \"__main__\": if", "for pool in get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms() verified_denom_map = {x[\"name\"]:", "pow(10, precision) return denom_price_map if __name__ == \"__main__\": if len(sys.argv) < 2: print(f\"usage:", "= verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]: continue precision = denom_data[\"precision\"] ticker = denom_data[\"ticker\"] price", "r = requests.get(url) return r.json()[\"data\"] def get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\"", "tvl = 0.0 for pool in get_pools(): vl = 0.0 try: for x", "= denom_data[\"ticker\"] price = price_map[ticker + \"USDT\"] denom_price_map[denom] = price / pow(10, precision)", "r = requests.get(url) if r.status_code != 200: return None return r.json()[\"verify_trace\"] def make_denom_price_map():", "price / pow(10, precision) return denom_price_map if __name__ == \"__main__\": if len(sys.argv) <", "denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms() verified_denom_map = {x[\"name\"]: x for x in", "x for x in verified_denoms} prices = get_prices() price_map = {x[\"Symbol\"]: x[\"Price\"] for", "0.0 fee_amount = 0.0 with open(sys.argv[1], newline=\"\") as f: reader = csv.DictReader(f) for", "try: x_price = denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]] except KeyError: continue swap_amount += int(row[\"offer_x\"])", "= 0.0 try: for x in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl +=", "y_price = denom_price_map[row[\"y_denom\"]] except KeyError: continue swap_amount += int(row[\"offer_x\"]) * x_price swap_amount +=", "precision) return denom_price_map if __name__ == \"__main__\": if len(sys.argv) < 2: print(f\"usage: python3", "def make_denom_price_map(): denom_set = set() for pool in get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms", "if len(sys.argv) < 2: print(f\"usage: python3 {sys.argv[0]} [pools file]\") sys.exit(0) denom_price_map = make_denom_price_map()", "pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except KeyError: continue tvl += vl print(f\"total", "return r.json()[\"verified_denoms\"] def get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url) return r.json()[\"data\"] def", "price_map[ticker + \"USDT\"] denom_price_map[denom] = price / pow(10, precision) return denom_price_map if __name__", "requests.get(url) return r.json()[\"pools\"] def get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r = requests.get(url)", "= 0.0 for pool in get_pools(): vl = 0.0 try: for x in", "denom_set = set() for pool in get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms()", "price = price_map[ticker + \"USDT\"] denom_price_map[denom] = price / pow(10, precision) return denom_price_map", "reader = csv.DictReader(f) for row in reader: try: x_price = denom_price_map[row[\"x_denom\"]] y_price =", "import requests def get_pools(): url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url) return r.json()[\"pools\"] def", "requests def get_pools(): url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url) return r.json()[\"pools\"] def get_balance(addr):", "pool in get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms() verified_denom_map = {x[\"name\"]: x", "\"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r = requests.get(url) if r.status_code != 200: return None return", "\"USDT\"] denom_price_map[denom] = price / pow(10, precision) return denom_price_map if __name__ == \"__main__\":", "for x in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"]) * denom_price_map[x[\"denom\"]]", "__name__ == \"__main__\": if len(sys.argv) < 2: print(f\"usage: python3 {sys.argv[0]} [pools file]\") sys.exit(0)", "!= 200: return None return r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set = set() for pool", "fee_amount += int(row[\"offer_x_fee\"]) * x_price fee_amount += int(row[\"demand_y_fee\"]) * y_price fee_amount += int(row[\"offer_y_fee\"])", "= price / pow(10, precision) return denom_price_map if __name__ == \"__main__\": if len(sys.argv)", "= denom_data[\"precision\"] ticker = denom_data[\"ticker\"] price = price_map[ticker + \"USDT\"] denom_price_map[denom] = price", "vl print(f\"total value locked: {tvl}\") swap_amount = 0.0 fee_amount = 0.0 with open(sys.argv[1],", "value locked: {tvl}\") swap_amount = 0.0 fee_amount = 0.0 with open(sys.argv[1], newline=\"\") as", "x in verified_denoms} prices = get_prices() price_map = {x[\"Symbol\"]: x[\"Price\"] for x in", "print(f\"usage: python3 {sys.argv[0]} [pools file]\") sys.exit(0) denom_price_map = make_denom_price_map() tvl = 0.0 for", "= make_denom_price_map() tvl = 0.0 for pool in get_pools(): vl = 0.0 try:", "pool in get_pools(): vl = 0.0 try: for x in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"]", "\"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url) return r.json()[\"pools\"] def get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr", "for denom in denom_set: if denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom) if not info: continue", "= 0.0 fee_amount = 0.0 with open(sys.argv[1], newline=\"\") as f: reader = csv.DictReader(f)", "fee_amount += int(row[\"offer_y_fee\"]) * y_price fee_amount += int(row[\"demand_x_fee\"]) * x_price print(f\"total swapped amount:", "= get_ibc_denom_info(denom) if not info: continue base_denom = info[\"base_denom\"] else: base_denom = denom", "verified_denoms = get_verified_denoms() verified_denom_map = {x[\"name\"]: x for x in verified_denoms} prices =", "requests.get(url) return r.json()[\"data\"] def get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom", "not info: continue base_denom = info[\"base_denom\"] else: base_denom = denom denom_data = verified_denom_map[base_denom]", "+= vl print(f\"total value locked: {tvl}\") swap_amount = 0.0 fee_amount = 0.0 with", "\"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url) return r.json()[\"data\"] def get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\") url =", "+ denom r = requests.get(url) if r.status_code != 200: return None return r.json()[\"verify_trace\"]", "= requests.get(url) return r.json()[\"balances\"] def get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url) return", "denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r = requests.get(url) if r.status_code != 200:", "= {} for denom in denom_set: if denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom) if not", "x in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except", "int(row[\"demand_y_fee\"]) * y_price fee_amount += int(row[\"offer_y_fee\"]) * y_price fee_amount += int(row[\"demand_x_fee\"]) * x_price", "get_verified_denoms() verified_denom_map = {x[\"name\"]: x for x in verified_denoms} prices = get_prices() price_map", "= info[\"base_denom\"] else: base_denom = denom denom_data = verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]: continue", "swap_amount += int(row[\"offer_y\"]) * y_price fee_amount += int(row[\"offer_x_fee\"]) * x_price fee_amount += int(row[\"demand_y_fee\"])", "+= int(row[\"offer_x\"]) * x_price swap_amount += int(row[\"offer_y\"]) * y_price fee_amount += int(row[\"offer_x_fee\"]) *", "except KeyError: continue swap_amount += int(row[\"offer_x\"]) * x_price swap_amount += int(row[\"offer_y\"]) * y_price", "* x_price swap_amount += int(row[\"offer_y\"]) * y_price fee_amount += int(row[\"offer_x_fee\"]) * x_price fee_amount", "< 2: print(f\"usage: python3 {sys.argv[0]} [pools file]\") sys.exit(0) denom_price_map = make_denom_price_map() tvl =", "def get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r = requests.get(url) return r.json()[\"balances\"] def", "swap_amount = 0.0 fee_amount = 0.0 with open(sys.argv[1], newline=\"\") as f: reader =", "if denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom) if not info: continue base_denom = info[\"base_denom\"] else:", "denom in denom_set: if denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom) if not info: continue base_denom", "int(row[\"offer_x_fee\"]) * x_price fee_amount += int(row[\"demand_y_fee\"]) * y_price fee_amount += int(row[\"offer_y_fee\"]) * y_price", "csv.DictReader(f) for row in reader: try: x_price = denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]] except", "for pool in get_pools(): vl = 0.0 try: for x in get_balance(pool[\"reserve_account_address\"]): if", "def get_pools(): url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url) return r.json()[\"pools\"] def get_balance(addr): url", "get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r = requests.get(url) if", "denom_data = verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]: continue precision = denom_data[\"precision\"] ticker = denom_data[\"ticker\"]", "len(sys.argv) < 2: print(f\"usage: python3 {sys.argv[0]} [pools file]\") sys.exit(0) denom_price_map = make_denom_price_map() tvl", "in verified_denoms} prices = get_prices() price_map = {x[\"Symbol\"]: x[\"Price\"] for x in prices[\"Tokens\"]}", "fee_amount += int(row[\"demand_x_fee\"]) * x_price print(f\"total swapped amount: {swap_amount}\") print(f\"total fees paid: {fee_amount}\")", "* y_price fee_amount += int(row[\"demand_x_fee\"]) * x_price print(f\"total swapped amount: {swap_amount}\") print(f\"total fees", "= requests.get(url) if r.status_code != 200: return None return r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set", "= get_verified_denoms() verified_denom_map = {x[\"name\"]: x for x in verified_denoms} prices = get_prices()", "reader: try: x_price = denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]] except KeyError: continue swap_amount +=", "return None return r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set = set() for pool in get_pools():", "verified_denom_map = {x[\"name\"]: x for x in verified_denoms} prices = get_prices() price_map =", "in get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms() verified_denom_map = {x[\"name\"]: x for", "r = requests.get(url) return r.json()[\"pools\"] def get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r", "= denom denom_data = verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]: continue precision = denom_data[\"precision\"] ticker", "csv import sys import requests def get_pools(): url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url)", "import csv import sys import requests def get_pools(): url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r =", "url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url) return r.json()[\"pools\"] def get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\"", "verified_denoms} prices = get_prices() price_map = {x[\"Symbol\"]: x[\"Price\"] for x in prices[\"Tokens\"]} denom_price_map", "{sys.argv[0]} [pools file]\") sys.exit(0) denom_price_map = make_denom_price_map() tvl = 0.0 for pool in", "y_price fee_amount += int(row[\"demand_x_fee\"]) * x_price print(f\"total swapped amount: {swap_amount}\") print(f\"total fees paid:", "make_denom_price_map(): denom_set = set() for pool in get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms =", "denom_price_map = {} for denom in denom_set: if denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom) if", "denom_set: if denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom) if not info: continue base_denom = info[\"base_denom\"]", "addr r = requests.get(url) return r.json()[\"balances\"] def get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\" r =", "for x in prices[\"Tokens\"]} denom_price_map = {} for denom in denom_set: if denom.startswith(\"ibc/\"):", "int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except KeyError: continue tvl += vl print(f\"total value locked: {tvl}\")", "def get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url) return r.json()[\"verified_denoms\"] def get_prices(): url", "get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url) return r.json()[\"verified_denoms\"] def get_prices(): url =", "get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url) return r.json()[\"data\"] def get_ibc_denom_info(denom): denom =", "info: continue base_denom = info[\"base_denom\"] else: base_denom = denom denom_data = verified_denom_map[base_denom] if", "continue base_denom = info[\"base_denom\"] else: base_denom = denom denom_data = verified_denom_map[base_denom] if not", "verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]: continue precision = denom_data[\"precision\"] ticker = denom_data[\"ticker\"] price =", "* denom_price_map[x[\"denom\"]] except KeyError: continue tvl += vl print(f\"total value locked: {tvl}\") swap_amount", "if not info: continue base_denom = info[\"base_denom\"] else: base_denom = denom denom_data =", "ticker = denom_data[\"ticker\"] price = price_map[ticker + \"USDT\"] denom_price_map[denom] = price / pow(10,", "= \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url) return r.json()[\"pools\"] def get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" +", "+= int(row[\"offer_y_fee\"]) * y_price fee_amount += int(row[\"demand_x_fee\"]) * x_price print(f\"total swapped amount: {swap_amount}\")", "= requests.get(url) return r.json()[\"verified_denoms\"] def get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url) return", "= \"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url) return r.json()[\"data\"] def get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\") url", "r = requests.get(url) return r.json()[\"balances\"] def get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url)", "get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except KeyError: continue", "row in reader: try: x_price = denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]] except KeyError: continue", "return r.json()[\"pools\"] def get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r = requests.get(url) return", "{tvl}\") swap_amount = 0.0 fee_amount = 0.0 with open(sys.argv[1], newline=\"\") as f: reader", "{x[\"Symbol\"]: x[\"Price\"] for x in prices[\"Tokens\"]} denom_price_map = {} for denom in denom_set:", "0.0 with open(sys.argv[1], newline=\"\") as f: reader = csv.DictReader(f) for row in reader:", "for row in reader: try: x_price = denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]] except KeyError:", "+= int(row[\"offer_x_fee\"]) * x_price fee_amount += int(row[\"demand_y_fee\"]) * y_price fee_amount += int(row[\"offer_y_fee\"]) *", "/ pow(10, precision) return denom_price_map if __name__ == \"__main__\": if len(sys.argv) < 2:", "fee_amount = 0.0 with open(sys.argv[1], newline=\"\") as f: reader = csv.DictReader(f) for row", "f: reader = csv.DictReader(f) for row in reader: try: x_price = denom_price_map[row[\"x_denom\"]] y_price", "continue tvl += vl print(f\"total value locked: {tvl}\") swap_amount = 0.0 fee_amount =", "* y_price fee_amount += int(row[\"offer_x_fee\"]) * x_price fee_amount += int(row[\"demand_y_fee\"]) * y_price fee_amount", "else: base_denom = denom denom_data = verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]: continue precision =", "requests.get(url) if r.status_code != 200: return None return r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set =", "denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]] except KeyError: continue swap_amount += int(row[\"offer_x\"]) * x_price swap_amount", "prices[\"Tokens\"]} denom_price_map = {} for denom in denom_set: if denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom)", "KeyError: continue tvl += vl print(f\"total value locked: {tvl}\") swap_amount = 0.0 fee_amount", "in get_pools(): vl = 0.0 try: for x in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in", "= {x[\"Symbol\"]: x[\"Price\"] for x in prices[\"Tokens\"]} denom_price_map = {} for denom in", "python3 {sys.argv[0]} [pools file]\") sys.exit(0) denom_price_map = make_denom_price_map() tvl = 0.0 for pool", "y_price fee_amount += int(row[\"offer_y_fee\"]) * y_price fee_amount += int(row[\"demand_x_fee\"]) * x_price print(f\"total swapped", "set() for pool in get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms() verified_denom_map =", "if __name__ == \"__main__\": if len(sys.argv) < 2: print(f\"usage: python3 {sys.argv[0]} [pools file]\")", "r = requests.get(url) return r.json()[\"verified_denoms\"] def get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url)", "continue swap_amount += int(row[\"offer_x\"]) * x_price swap_amount += int(row[\"offer_y\"]) * y_price fee_amount +=", "get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r = requests.get(url) return r.json()[\"balances\"] def get_verified_denoms():", "return r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set = set() for pool in get_pools(): denom_set |=", "vl += int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except KeyError: continue tvl += vl print(f\"total value", "print(f\"total value locked: {tvl}\") swap_amount = 0.0 fee_amount = 0.0 with open(sys.argv[1], newline=\"\")", "denom = denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r = requests.get(url) if r.status_code", "denom_data[\"ticker\"] price = price_map[ticker + \"USDT\"] denom_price_map[denom] = price / pow(10, precision) return", "set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms() verified_denom_map = {x[\"name\"]: x for x in verified_denoms} prices", "get_pools(): vl = 0.0 try: for x in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in pool[\"reserve_coin_denoms\"]:", "x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except KeyError: continue tvl +=", "= csv.DictReader(f) for row in reader: try: x_price = denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]]", "x_price swap_amount += int(row[\"offer_y\"]) * y_price fee_amount += int(row[\"offer_x_fee\"]) * x_price fee_amount +=", "get_ibc_denom_info(denom) if not info: continue base_denom = info[\"base_denom\"] else: base_denom = denom denom_data", "locked: {tvl}\") swap_amount = 0.0 fee_amount = 0.0 with open(sys.argv[1], newline=\"\") as f:", "+= int(row[\"offer_y\"]) * y_price fee_amount += int(row[\"offer_x_fee\"]) * x_price fee_amount += int(row[\"demand_y_fee\"]) *", "0.0 for pool in get_pools(): vl = 0.0 try: for x in get_balance(pool[\"reserve_account_address\"]):", "return denom_price_map if __name__ == \"__main__\": if len(sys.argv) < 2: print(f\"usage: python3 {sys.argv[0]}", "as f: reader = csv.DictReader(f) for row in reader: try: x_price = denom_price_map[row[\"x_denom\"]]", "[pools file]\") sys.exit(0) denom_price_map = make_denom_price_map() tvl = 0.0 for pool in get_pools():", "int(row[\"offer_y_fee\"]) * y_price fee_amount += int(row[\"demand_x_fee\"]) * x_price print(f\"total swapped amount: {swap_amount}\") print(f\"total", "* x_price fee_amount += int(row[\"demand_y_fee\"]) * y_price fee_amount += int(row[\"offer_y_fee\"]) * y_price fee_amount", "+= int(row[\"demand_y_fee\"]) * y_price fee_amount += int(row[\"offer_y_fee\"]) * y_price fee_amount += int(row[\"demand_x_fee\"]) *", "in pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except KeyError: continue tvl += vl", "= {x[\"name\"]: x for x in verified_denoms} prices = get_prices() price_map = {x[\"Symbol\"]:", "= price_map[ticker + \"USDT\"] denom_price_map[denom] = price / pow(10, precision) return denom_price_map if", "x in prices[\"Tokens\"]} denom_price_map = {} for denom in denom_set: if denom.startswith(\"ibc/\"): info", "x_price fee_amount += int(row[\"demand_y_fee\"]) * y_price fee_amount += int(row[\"offer_y_fee\"]) * y_price fee_amount +=", "{x[\"name\"]: x for x in verified_denoms} prices = get_prices() price_map = {x[\"Symbol\"]: x[\"Price\"]", "int(row[\"offer_x\"]) * x_price swap_amount += int(row[\"offer_y\"]) * y_price fee_amount += int(row[\"offer_x_fee\"]) * x_price", "info = get_ibc_denom_info(denom) if not info: continue base_denom = info[\"base_denom\"] else: base_denom =", "file]\") sys.exit(0) denom_price_map = make_denom_price_map() tvl = 0.0 for pool in get_pools(): vl", "def get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url) return r.json()[\"data\"] def get_ibc_denom_info(denom): denom", "x[\"Price\"] for x in prices[\"Tokens\"]} denom_price_map = {} for denom in denom_set: if", "= denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]] except KeyError: continue swap_amount += int(row[\"offer_x\"]) * x_price", "def get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r = requests.get(url)", "fee_amount += int(row[\"demand_y_fee\"]) * y_price fee_amount += int(row[\"offer_y_fee\"]) * y_price fee_amount += int(row[\"demand_x_fee\"])", "get_prices() price_map = {x[\"Symbol\"]: x[\"Price\"] for x in prices[\"Tokens\"]} denom_price_map = {} for", "url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r = requests.get(url) if r.status_code != 200: return", "r.status_code != 200: return None return r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set = set() for", "url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r = requests.get(url) return r.json()[\"balances\"] def get_verified_denoms(): url", "get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms() verified_denom_map = {x[\"name\"]: x for x", "precision = denom_data[\"precision\"] ticker = denom_data[\"ticker\"] price = price_map[ticker + \"USDT\"] denom_price_map[denom] =", "make_denom_price_map() tvl = 0.0 for pool in get_pools(): vl = 0.0 try: for", "for x in verified_denoms} prices = get_prices() price_map = {x[\"Symbol\"]: x[\"Price\"] for x", "+ \"USDT\"] denom_price_map[denom] = price / pow(10, precision) return denom_price_map if __name__ ==", "if x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except KeyError: continue tvl", "in reader: try: x_price = denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]] except KeyError: continue swap_amount", "int(row[\"offer_y\"]) * y_price fee_amount += int(row[\"offer_x_fee\"]) * x_price fee_amount += int(row[\"demand_y_fee\"]) * y_price", "None return r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set = set() for pool in get_pools(): denom_set", "if not denom_data[\"fetch_price\"]: continue precision = denom_data[\"precision\"] ticker = denom_data[\"ticker\"] price = price_map[ticker", "KeyError: continue swap_amount += int(row[\"offer_x\"]) * x_price swap_amount += int(row[\"offer_y\"]) * y_price fee_amount", "x_price = denom_price_map[row[\"x_denom\"]] y_price = denom_price_map[row[\"y_denom\"]] except KeyError: continue swap_amount += int(row[\"offer_x\"]) *", "denom_data[\"fetch_price\"]: continue precision = denom_data[\"precision\"] ticker = denom_data[\"ticker\"] price = price_map[ticker + \"USDT\"]", "denom_data[\"precision\"] ticker = denom_data[\"ticker\"] price = price_map[ticker + \"USDT\"] denom_price_map[denom] = price /", "= get_prices() price_map = {x[\"Symbol\"]: x[\"Price\"] for x in prices[\"Tokens\"]} denom_price_map = {}", "denom_price_map[row[\"y_denom\"]] except KeyError: continue swap_amount += int(row[\"offer_x\"]) * x_price swap_amount += int(row[\"offer_y\"]) *", "= requests.get(url) return r.json()[\"pools\"] def get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r =", "r.json()[\"data\"] def get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r =", "in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"]) * denom_price_map[x[\"denom\"]] except KeyError:", "0.0 try: for x in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"])", "= \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" + denom r = requests.get(url) if r.status_code != 200: return None", "denom_price_map[x[\"denom\"]] except KeyError: continue tvl += vl print(f\"total value locked: {tvl}\") swap_amount =", "y_price fee_amount += int(row[\"offer_x_fee\"]) * x_price fee_amount += int(row[\"demand_y_fee\"]) * y_price fee_amount +=", "in prices[\"Tokens\"]} denom_price_map = {} for denom in denom_set: if denom.startswith(\"ibc/\"): info =", "url = \"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url) return r.json()[\"data\"] def get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\")", "denom_price_map = make_denom_price_map() tvl = 0.0 for pool in get_pools(): vl = 0.0", "= denom_price_map[row[\"y_denom\"]] except KeyError: continue swap_amount += int(row[\"offer_x\"]) * x_price swap_amount += int(row[\"offer_y\"])", "denom_price_map if __name__ == \"__main__\": if len(sys.argv) < 2: print(f\"usage: python3 {sys.argv[0]} [pools", "r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set = set() for pool in get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"])", "info[\"base_denom\"] else: base_denom = denom denom_data = verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]: continue precision", "base_denom = denom denom_data = verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]: continue precision = denom_data[\"precision\"]", "= \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r = requests.get(url) return r.json()[\"balances\"] def get_verified_denoms(): url =", "r.json()[\"pools\"] def get_balance(addr): url = \"https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/\" + addr r = requests.get(url) return r.json()[\"balances\"]", "swap_amount += int(row[\"offer_x\"]) * x_price swap_amount += int(row[\"offer_y\"]) * y_price fee_amount += int(row[\"offer_x_fee\"])", "|= set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms() verified_denom_map = {x[\"name\"]: x for x in verified_denoms}", "get_pools(): url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url) return r.json()[\"pools\"] def get_balance(addr): url =", "prices = get_prices() price_map = {x[\"Symbol\"]: x[\"Price\"] for x in prices[\"Tokens\"]} denom_price_map =", "except KeyError: continue tvl += vl print(f\"total value locked: {tvl}\") swap_amount = 0.0", "r.json()[\"verified_denoms\"] def get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url) return r.json()[\"data\"] def get_ibc_denom_info(denom):", "if r.status_code != 200: return None return r.json()[\"verify_trace\"] def make_denom_price_map(): denom_set = set()", "= requests.get(url) return r.json()[\"data\"] def get_ibc_denom_info(denom): denom = denom.removeprefix(\"ibc/\") url = \"https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/\" +", "r.json()[\"balances\"] def get_verified_denoms(): url = \"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url) return r.json()[\"verified_denoms\"] def get_prices():", "vl = 0.0 try: for x in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl", "= set() for pool in get_pools(): denom_set |= set(pool[\"reserve_coin_denoms\"]) verified_denoms = get_verified_denoms() verified_denom_map", "url = \"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url) return r.json()[\"verified_denoms\"] def get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\"", "not denom_data[\"fetch_price\"]: continue precision = denom_data[\"precision\"] ticker = denom_data[\"ticker\"] price = price_map[ticker +", "base_denom = info[\"base_denom\"] else: base_denom = denom denom_data = verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]:", "requests.get(url) return r.json()[\"verified_denoms\"] def get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\" r = requests.get(url) return r.json()[\"data\"]", "denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom) if not info: continue base_denom = info[\"base_denom\"] else: base_denom", "open(sys.argv[1], newline=\"\") as f: reader = csv.DictReader(f) for row in reader: try: x_price", "continue precision = denom_data[\"precision\"] ticker = denom_data[\"ticker\"] price = price_map[ticker + \"USDT\"] denom_price_map[denom]", "2: print(f\"usage: python3 {sys.argv[0]} [pools file]\") sys.exit(0) denom_price_map = make_denom_price_map() tvl = 0.0", "try: for x in get_balance(pool[\"reserve_account_address\"]): if x[\"denom\"] in pool[\"reserve_coin_denoms\"]: vl += int(x[\"amount\"]) *", "newline=\"\") as f: reader = csv.DictReader(f) for row in reader: try: x_price =", "in denom_set: if denom.startswith(\"ibc/\"): info = get_ibc_denom_info(denom) if not info: continue base_denom =", "with open(sys.argv[1], newline=\"\") as f: reader = csv.DictReader(f) for row in reader: try:", "sys.exit(0) denom_price_map = make_denom_price_map() tvl = 0.0 for pool in get_pools(): vl =", "* y_price fee_amount += int(row[\"offer_y_fee\"]) * y_price fee_amount += int(row[\"demand_x_fee\"]) * x_price print(f\"total", "denom denom_data = verified_denom_map[base_denom] if not denom_data[\"fetch_price\"]: continue precision = denom_data[\"precision\"] ticker =", "= \"https://staging.demeris.io/v1/verified_denoms\" r = requests.get(url) return r.json()[\"verified_denoms\"] def get_prices(): url = \"https://staging.demeris.io/v1/oracle/prices\" r", "sys import requests def get_pools(): url = \"https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools\" r = requests.get(url) return r.json()[\"pools\"]" ]
[ "proteomicsData = pd.read_csv(proteomicsLoc) resultsPath = '%s/%s' %(scriptLoc, resultsFolder) if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath)", "<NAME>i See the supplementary material of the paper for the derivation of the", "the supplementary material of the paper for the derivation of the constraint \"\"\"", "scriptLoc = sys.argv[3] proteomicsLoc = sys.argv[4] resultsFolder = sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr", "operator = '<=', rhs = protSum) os.chdir(resultsPath) \"\"\" Here, we define the multiplier", "resultsFolder) if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total protein volume constraint for <NAME>i", "nutrients in the growth medium. We will use this to perform glucose (and", "numpy as np import os import sys import pandas as pd import re", "for the derivation of the constraint \"\"\" protSum=float(0.62/0.34) pID = 'UP000000625' constraint =", "supplemented MOPS variants) limitation simulations. \"\"\" multiplier = 1.0 #No changes in Glc", "variants) limitation simulations. \"\"\" multiplier = 1.0 #No changes in Glc abundance for", "the paper for the derivation of the constraint \"\"\" protSum=float(0.62/0.34) pID = 'UP000000625'", "proteomicsLoc = sys.argv[4] resultsFolder = sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False) growthData", "multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model, pre_opt=True) cbmpy.CBWrite.writeFVAdata(fva[0],", "* growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model, pre_opt=True) cbmpy.CBWrite.writeFVAdata(fva[0], fva[1],", "False) growthData = pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc) resultsPath = '%s/%s' %(scriptLoc, resultsFolder) if", "= 1.0 #No changes in Glc abundance for i in growthData['Reaction ID']: model.setReactionLowerBound(i,", "\"\"\" multiplier = 1.0 #No changes in Glc abundance for i in growthData['Reaction", "model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False) growthData = pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc) resultsPath", "UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None, fluxes = constraint, operator = '<=',", "abundance for i in growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult", "pandas as pd import re modelLoc = sys.argv[1] growthMediumLoc = sys.argv[2] scriptLoc =", "growthData = pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc) resultsPath = '%s/%s' %(scriptLoc, resultsFolder) if not", "constraint, operator = '<=', rhs = protSum) os.chdir(resultsPath) \"\"\" Here, we define the", "use this to perform glucose (and amino acid, for the supplemented MOPS variants)", "'%s/%s' %(scriptLoc, resultsFolder) if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total protein volume constraint", "sep = '\\t') for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None,", "\"\"\" protSum=float(0.62/0.34) pID = 'UP000000625' constraint = [] UniProtIDs = pd.read_csv('proteinMasses.txt', sep =", "the concentrations of nutrients in the growth medium. We will use this to", "(os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total protein volume constraint for <NAME>i See the supplementary", "= False) growthData = pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc) resultsPath = '%s/%s' %(scriptLoc, resultsFolder)", "= [] UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\\t') for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))),", "limitation simulations. \"\"\" multiplier = 1.0 #No changes in Glc abundance for i", "sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False) growthData = pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc)", "pd.read_csv(proteomicsLoc) resultsPath = '%s/%s' %(scriptLoc, resultsFolder) if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total", "growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva =", "will use this to perform glucose (and amino acid, for the supplemented MOPS", "%(scriptLoc, resultsFolder) if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total protein volume constraint for", "supplementary material of the paper for the derivation of the constraint \"\"\" protSum=float(0.62/0.34)", "Total protein volume constraint for <NAME>i See the supplementary material of the paper", "growth medium. We will use this to perform glucose (and amino acid, for", "'\\t') for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None, fluxes =", "the multiplier for the concentrations of nutrients in the growth medium. We will", "See the supplementary material of the paper for the derivation of the constraint", "import sys import pandas as pd import re modelLoc = sys.argv[1] growthMediumLoc =", "(and amino acid, for the supplemented MOPS variants) limitation simulations. \"\"\" multiplier =", "amino acid, for the supplemented MOPS variants) limitation simulations. \"\"\" multiplier = 1.0", "sys.argv[1] growthMediumLoc = sys.argv[2] scriptLoc = sys.argv[3] proteomicsLoc = sys.argv[4] resultsFolder = sys.argv[5]", "= pd.read_csv(proteomicsLoc) resultsPath = '%s/%s' %(scriptLoc, resultsFolder) if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\"", "protein volume constraint for <NAME>i See the supplementary material of the paper for", "Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model, pre_opt=True) cbmpy.CBWrite.writeFVAdata(fva[0], fva[1], 'glcTitration_%s_%.2f.csv' %(os.path.split(growthMediumLoc)[1].replace('.csv',", "fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model, pre_opt=True) cbmpy.CBWrite.writeFVAdata(fva[0], fva[1], 'glcTitration_%s_%.2f.csv' %(os.path.split(growthMediumLoc)[1].replace('.csv', ''), j))", "os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total protein volume constraint for <NAME>i See the supplementary material", "in growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva", "in the growth medium. We will use this to perform glucose (and amino", "model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model, pre_opt=True)", "constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None, fluxes = constraint, operator = '<=', rhs", "'<=', rhs = protSum) os.chdir(resultsPath) \"\"\" Here, we define the multiplier for the", "MOPS variants) limitation simulations. \"\"\" multiplier = 1.0 #No changes in Glc abundance", "acid, for the supplemented MOPS variants) limitation simulations. \"\"\" multiplier = 1.0 #No", "material of the paper for the derivation of the constraint \"\"\" protSum=float(0.62/0.34) pID", "= 'UP000000625' constraint = [] UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\\t') for entry", "\"\"\" Here, we define the multiplier for the concentrations of nutrients in the", "i in growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model)", "ID']: model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model,", "rhs = protSum) os.chdir(resultsPath) \"\"\" Here, we define the multiplier for the concentrations", "protSum) os.chdir(resultsPath) \"\"\" Here, we define the multiplier for the concentrations of nutrients", "import re modelLoc = sys.argv[1] growthMediumLoc = sys.argv[2] scriptLoc = sys.argv[3] proteomicsLoc =", "in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None, fluxes = constraint, operator =", "if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total protein volume constraint for <NAME>i See", "= sys.argv[3] proteomicsLoc = sys.argv[4] resultsFolder = sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr =", "= cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False) growthData = pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc) resultsPath =", "growthMediumLoc = sys.argv[2] scriptLoc = sys.argv[3] proteomicsLoc = sys.argv[4] resultsFolder = sys.argv[5] model", "for <NAME>i See the supplementary material of the paper for the derivation of", "= sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False) growthData = pd.read_csv(growthMediumLoc) proteomicsData =", "\"\"\" Total protein volume constraint for <NAME>i See the supplementary material of the", "import pandas as pd import re modelLoc = sys.argv[1] growthMediumLoc = sys.argv[2] scriptLoc", "UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\\t') for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])])", "import cbmpy import numpy as np import os import sys import pandas as", "entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None, fluxes = constraint, operator", "define the multiplier for the concentrations of nutrients in the growth medium. We", "import os import sys import pandas as pd import re modelLoc = sys.argv[1]", "glucose (and amino acid, for the supplemented MOPS variants) limitation simulations. \"\"\" multiplier", "sys import pandas as pd import re modelLoc = sys.argv[1] growthMediumLoc = sys.argv[2]", "Here, we define the multiplier for the concentrations of nutrients in the growth", "#No changes in Glc abundance for i in growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier *", "pd.read_csv('proteinMasses.txt', sep = '\\t') for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid =", "constraint = [] UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\\t') for entry in UniProtIDs.index:", "pID = 'UP000000625' constraint = [] UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\\t') for", "Glc abundance for i in growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0])", "in Glc abundance for i in growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction", "scan_notes_gpr = False) growthData = pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc) resultsPath = '%s/%s' %(scriptLoc,", "We will use this to perform glucose (and amino acid, for the supplemented", "re modelLoc = sys.argv[1] growthMediumLoc = sys.argv[2] scriptLoc = sys.argv[3] proteomicsLoc = sys.argv[4]", "sys.argv[2] scriptLoc = sys.argv[3] proteomicsLoc = sys.argv[4] resultsFolder = sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc,", "= sys.argv[4] resultsFolder = sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False) growthData =", "sys.argv[4] resultsFolder = sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False) growthData = pd.read_csv(growthMediumLoc)", "of the constraint \"\"\" protSum=float(0.62/0.34) pID = 'UP000000625' constraint = [] UniProtIDs =", "ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model, pre_opt=True) cbmpy.CBWrite.writeFVAdata(fva[0], fva[1], 'glcTitration_%s_%.2f.csv' %(os.path.split(growthMediumLoc)[1].replace('.csv', ''),", "fluxes = constraint, operator = '<=', rhs = protSum) os.chdir(resultsPath) \"\"\" Here, we", "cbmpy import numpy as np import os import sys import pandas as pd", "os.chdir(resultsPath) \"\"\" Total protein volume constraint for <NAME>i See the supplementary material of", "constraint for <NAME>i See the supplementary material of the paper for the derivation", "volume constraint for <NAME>i See the supplementary material of the paper for the", "[] UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\\t') for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis'", "1.0 #No changes in Glc abundance for i in growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier", "simulations. \"\"\" multiplier = 1.0 #No changes in Glc abundance for i in", "= protSum) os.chdir(resultsPath) \"\"\" Here, we define the multiplier for the concentrations of", "os import sys import pandas as pd import re modelLoc = sys.argv[1] growthMediumLoc", "= sys.argv[1] growthMediumLoc = sys.argv[2] scriptLoc = sys.argv[3] proteomicsLoc = sys.argv[4] resultsFolder =", "the derivation of the constraint \"\"\" protSum=float(0.62/0.34) pID = 'UP000000625' constraint = []", "this to perform glucose (and amino acid, for the supplemented MOPS variants) limitation", "as np import os import sys import pandas as pd import re modelLoc", "resultsPath = '%s/%s' %(scriptLoc, resultsFolder) if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total protein", "= '\\t') for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None, fluxes", "growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model) fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model, pre_opt=True) cbmpy.CBWrite.writeFVAdata(fva[0], fva[1], 'glcTitration_%s_%.2f.csv'", "sys.argv[3] proteomicsLoc = sys.argv[4] resultsFolder = sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False)", "= sys.argv[2] scriptLoc = sys.argv[3] proteomicsLoc = sys.argv[4] resultsFolder = sys.argv[5] model =", "multiplier for the concentrations of nutrients in the growth medium. We will use", "the supplemented MOPS variants) limitation simulations. \"\"\" multiplier = 1.0 #No changes in", "protSum=float(0.62/0.34) pID = 'UP000000625' constraint = [] UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\\t')", "np import os import sys import pandas as pd import re modelLoc =", "as pd import re modelLoc = sys.argv[1] growthMediumLoc = sys.argv[2] scriptLoc = sys.argv[3]", "cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False) growthData = pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc) resultsPath = '%s/%s'", "changes in Glc abundance for i in growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier * growthData['Lower", "for i in growthData['Reaction ID']: model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0]) fbaResult =", "the growth medium. We will use this to perform glucose (and amino acid,", "to perform glucose (and amino acid, for the supplemented MOPS variants) limitation simulations.", "import numpy as np import os import sys import pandas as pd import", "medium. We will use this to perform glucose (and amino acid, for the", "'UP000000625' constraint = [] UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\\t') for entry in", "concentrations of nutrients in the growth medium. We will use this to perform", "resultsFolder = sys.argv[5] model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False) growthData = pd.read_csv(growthMediumLoc) proteomicsData", "perform glucose (and amino acid, for the supplemented MOPS variants) limitation simulations. \"\"\"", "the constraint \"\"\" protSum=float(0.62/0.34) pID = 'UP000000625' constraint = [] UniProtIDs = pd.read_csv('proteinMasses.txt',", "= pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc) resultsPath = '%s/%s' %(scriptLoc, resultsFolder) if not (os.path.isdir(resultsPath)):", "constraint \"\"\" protSum=float(0.62/0.34) pID = 'UP000000625' constraint = [] UniProtIDs = pd.read_csv('proteinMasses.txt', sep", "= '%s/%s' %(scriptLoc, resultsFolder) if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total protein volume", "= constraint, operator = '<=', rhs = protSum) os.chdir(resultsPath) \"\"\" Here, we define", "we define the multiplier for the concentrations of nutrients in the growth medium.", "multiplier = 1.0 #No changes in Glc abundance for i in growthData['Reaction ID']:", "pd.read_csv(growthMediumLoc) proteomicsData = pd.read_csv(proteomicsLoc) resultsPath = '%s/%s' %(scriptLoc, resultsFolder) if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath)", "paper for the derivation of the constraint \"\"\" protSum=float(0.62/0.34) pID = 'UP000000625' constraint", "'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None, fluxes = constraint, operator = '<=', rhs =", "for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None, fluxes = constraint,", "= pd.read_csv('proteinMasses.txt', sep = '\\t') for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid", "of nutrients in the growth medium. We will use this to perform glucose", "of the paper for the derivation of the constraint \"\"\" protSum=float(0.62/0.34) pID =", "modelLoc = sys.argv[1] growthMediumLoc = sys.argv[2] scriptLoc = sys.argv[3] proteomicsLoc = sys.argv[4] resultsFolder", "pd import re modelLoc = sys.argv[1] growthMediumLoc = sys.argv[2] scriptLoc = sys.argv[3] proteomicsLoc", "for the supplemented MOPS variants) limitation simulations. \"\"\" multiplier = 1.0 #No changes", "%(UniProtIDs['Entry'][entry])]) model.addUserConstraint(pid = None, fluxes = constraint, operator = '<=', rhs = protSum)", "= '<=', rhs = protSum) os.chdir(resultsPath) \"\"\" Here, we define the multiplier for", "derivation of the constraint \"\"\" protSum=float(0.62/0.34) pID = 'UP000000625' constraint = [] UniProtIDs", "for the concentrations of nutrients in the growth medium. We will use this", "model.addUserConstraint(pid = None, fluxes = constraint, operator = '<=', rhs = protSum) os.chdir(resultsPath)", "None, fluxes = constraint, operator = '<=', rhs = protSum) os.chdir(resultsPath) \"\"\" Here,", "not (os.path.isdir(resultsPath)): os.mkdir(resultsPath) os.chdir(resultsPath) \"\"\" Total protein volume constraint for <NAME>i See the", "= None, fluxes = constraint, operator = '<=', rhs = protSum) os.chdir(resultsPath) \"\"\"", "os.chdir(resultsPath) \"\"\" Here, we define the multiplier for the concentrations of nutrients in" ]
[ "else: strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) ######################################################################################", "python runsextractor.py $i; done'%par['spool'] os.system(strg) if (cosmic == 0): strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits", "if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic = 0 if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving", "'%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp", "img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname): ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i],", "nameroot = nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot,", "else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else:", "for i in range(1,len(par['spool'])): if dodarks[0] == 3 or dodarks[0] == 0: dodarks.append(dodarks[0])", "= nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname): ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg',", "################### cosmics and sextractor def runsex(par,cosmic): if (cosmic == 1): strg = 'python", "exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool %s to be found'%fname sys.exit(0) ################### aperture", "== 0): strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] print", "f = open(configfile, 'r') config_string = f.read() parameters = eval(config_string) return parameters #unspool", "failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool %s to be found'%fname sys.exit(0) else", "outpath) cosmic = 0 if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic = 1 if", "sys.path.append(\"../LIHSPcommon\") from myutils import mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f = open(configfile,", "else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'],", "(isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if", "dodarks[0], heredir,outpath,0) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print", "if mymkdir(heredir)!= 0: sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] fname", "print 'no spool %s to be found'%fname sys.exit(0) else : for i,img in", "if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'], int(par['last']), par['ap'],", "cosmics...\\n\\n\\n\" cosmic = 1 if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic) if par['phot'].startswith('y'):", "= '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret", "sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot)", "nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header print \"last image", "in range(1,len(par['spool'])): if dodarks[0] == 3 or dodarks[0] == 0: dodarks.append(dodarks[0]) else: dodarks.append(2)", "print inpath print par['spool'], dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0: sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool']", "nameroot) if os.path.isfile(fname): ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if ret !=1:", "1 if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic) if par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf", "do python runsextractor.py $i; done'%par['spool'] os.system(strg) if (cosmic == 0): strg='for i in", "par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last image in spool: \",par['last'] if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\"", "3 or dodarks[0] == 0: dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath print par['spool'],", "%s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) ###################################################################################### from myutils import mymkdir,mygetenv import types par =", "photometry def myapphot(par, cosmic): from myutils import mymkdir,mygetenv from myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT')", "import * SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5]", "if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp)", "image in spool: \",par['last'] if par['last'] == 0 or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print", "spool: \",par['last'] if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic = 0 if par['cosmic'].startswith('y'):", "'%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if", "'r') config_string = f.read() parameters = eval(config_string) return parameters #unspool def unspool(par,outpath): from", "if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if", "image in spool: \",par['last'] if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic = 0", "img = img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname): ret = unspoolit(inpath, img+'.fits',", "if mymkdir(outpath)!= 0: sys.exit() if mymkdir(outpath+'/unspooled')!= 0: sys.exit() # strg = 'mkdir %s'%outpath", "import mymkdir,mygetenv from unspool import unspoolit if mymkdir(outpath)!= 0: sys.exit() if mymkdir(outpath+'/unspooled')!= 0:", "== 0 or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last image in spool: \",par['last'] if", "import sys,os,time import pyfits as PF sys.path.append(\"../LIHSPcommon\") from myutils import mygetenv,readconfig, mymkdir, mjd", "open(configfile, 'r') config_string = f.read() parameters = eval(config_string) return parameters #unspool def unspool(par,outpath):", "$i; done'%par['spool'] print strg os.system(strg) #photometry def photometry(par, cosmic): if (cosmic): strg =", "failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool %s to be found'%fname sys.exit(0) ###################", "import unspoolit if mymkdir(outpath)!= 0: sys.exit() if mymkdir(outpath+'/unspooled')!= 0: sys.exit() # strg =", "%d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) else: strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d", "%s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) else: strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s", "0 if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic = 1 if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting", "= open(configfile, 'r') config_string = f.read() parameters = eval(config_string) return parameters #unspool def", "0: sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'],", "speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f = open(configfile, 'r') config_string = f.read() parameters = eval(config_string)", "unspool import unspoolit if mymkdir(outpath)!= 0: sys.exit() if mymkdir(outpath+'/unspooled')!= 0: sys.exit() # strg", "myutils import mymkdir,mygetenv from myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool']", "nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'],", "found'%fname sys.exit(0) else : for i,img in enumerate(par['spool']): if img.endswith('.fits'): img = img[:-5]", "unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\"", "par['spool']) os.system(strg) else: strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool'])", "par['ap'], par['centroid'], nameroot, par['target']) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0)", "= '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else:", "if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic = 1 if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\"", "sys.exit() dodarks = [par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print \"only 1 file to unspool\" else:", "# strg = 'mkdir %s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0: sys.exit() dodarks", "%s to be found'%fname sys.exit(0) ################### aperture photometry def myapphot(par, cosmic): from myutils", "== 3 or dodarks[0] == 0: dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath print", "nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot)", "%d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] os.system(strg)", "outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header print \"last image in", "def runsex(par,cosmic): if (cosmic == 1): strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0", "%s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s", "nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5]", "if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else :", "if (cosmic == 1): strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg)", "os.system(strg) if (cosmic == 0): strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py", "'%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header print \"last image in spool: \",par['last']", "and sextractor def runsex(par,cosmic): if (cosmic == 1): strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s", "import mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f = open(configfile, 'r') config_string =", "else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath print par['spool'], dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0: sys.exit()", "nameroot = nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot", "dodarks = [par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print \"only 1 file to unspool\" else: for", "par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic = 1 if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par,", "heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0: sys.exit() dodarks = [par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print \"only 1", "nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath", "outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath =", "be found'%fname sys.exit(0) ################### aperture photometry def myapphot(par, cosmic): from myutils import mymkdir,mygetenv", "fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname): ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0)", "%s to be found'%fname sys.exit(0) else : for i,img in enumerate(par['spool']): if img.endswith('.fits'):", "'no spool %s to be found'%fname sys.exit(0) else : for i,img in enumerate(par['spool']):", "/science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) ###################################################################################### from myutils import mymkdir,mygetenv import", "= readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5]", "unspool(par,outpath): from myutils import mymkdir,mygetenv from unspool import unspoolit if mymkdir(outpath)!= 0: sys.exit()", "def unspool(par,outpath): from myutils import mymkdir,mygetenv from unspool import unspoolit if mymkdir(outpath)!= 0:", "strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] os.system(strg) if (cosmic", "par = readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot =", "img.endswith('.fits'): img = img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname): ret = unspoolit(inpath,", "strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) if par['myapphot'].startswith('y'): print", "print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool %s to be", "= 0 if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic = 1 if par['sextract'].startswith('y'): print", "else: strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) if par['myapphot'].startswith('y'):", "'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg = 'python extractlc.py", "import mymkdir,mygetenv from myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if", "if dodarks[0] == 3 or dodarks[0] == 0: dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print", "exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool %s to be found'%fname sys.exit(0) else :", "myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot =", "%s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) else: strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'],", "myutils import mymkdir,mygetenv import types par = readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)):", "'%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot, par['target']) if ret !=1: print", "spool %s to be found'%fname sys.exit(0) else : for i,img in enumerate(par['spool']): if", "dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath print par['spool'], dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0: sys.exit() if", "%s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) ###################################################################################### from myutils import mymkdir,mygetenv import types", "runsextractor.py $i; done'%par['spool'] print strg os.system(strg) #photometry def photometry(par, cosmic): if (cosmic): strg", "par['spool']) os.system(strg) ###################################################################################### from myutils import mymkdir,mygetenv import types par = readconfig(sys.argv[1]) print", "heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0: sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5]", "nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool']", "sys.exit() if mymkdir(outpath+'/unspooled')!= 0: sys.exit() # strg = 'mkdir %s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/'", "0: dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath print par['spool'], dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!=", "done'%par['spool'] print strg os.system(strg) #photometry def photometry(par, cosmic): if (cosmic): strg = 'python", "inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else:", "1 file to unspool\" else: for i in range(1,len(par['spool'])): if dodarks[0] == 3", "os.system(strg) else: strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg)", "mymkdir,mygetenv from myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'):", "(isinstance(par['spool'],types.StringTypes)): print \"only 1 file to unspool\" else: for i in range(1,len(par['spool'])): if", "ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ################### cosmics and sextractor", "= '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot, par['target']) if ret !=1:", "nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header", ": for i,img in enumerate(par['spool']): if img.endswith('.fits'): img = img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'],", "nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath =", "myutils import mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f = open(configfile, 'r') config_string", "nameroot = nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot =", "extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled", "nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0] if", "= 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ;", "heredir,outpath) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no", "= nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header print \"last", "cosmics and sextractor def runsex(par,cosmic): if (cosmic == 1): strg = 'python runcosmic.py", "\"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par, cosmic) if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg =", "par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par, cosmic) if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic):", "nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname): ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0],", "$i; done'%par['spool'] os.system(strg) if (cosmic == 0): strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do", "import pyfits as PF sys.path.append(\"../LIHSPcommon\") from myutils import mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def", "cosmic): from myutils import mymkdir,mygetenv from myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if", "enumerate(par['spool']): if img.endswith('.fits'): img = img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname): ret", "unspoolit if mymkdir(outpath)!= 0: sys.exit() if mymkdir(outpath+'/unspooled')!= 0: sys.exit() # strg = 'mkdir", "if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot", "= eval(config_string) return parameters #unspool def unspool(par,outpath): from myutils import mymkdir,mygetenv from unspool", "pyfits as PF sys.path.append(\"../LIHSPcommon\") from myutils import mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile):", "; do python runsextractor.py $i; done'%par['spool'] print strg os.system(strg) #photometry def photometry(par, cosmic):", "(cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else:", "or dodarks[0] == 0: dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath print par['spool'], dodarks", "= unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed.", "i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] os.system(strg) if (cosmic ==", "if mymkdir(outpath+'/unspooled')!= 0: sys.exit() # strg = 'mkdir %s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if", "print 'no spool %s to be found'%fname sys.exit(0) ################### aperture photometry def myapphot(par,", "or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last image in spool: \",par['last'] if par['unspool'].startswith('y'): print", "if img.endswith('.fits'): img = img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname): ret =", "if os.path.isfile(fname): ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if ret !=1: print", "'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) if par['myapphot'].startswith('y'): print \"\\n\\n\\nrunning my", "= 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) ###################################################################################### from myutils", "\"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic = 0 if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic =", "ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling", "if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if", "= f.read() parameters = eval(config_string) return parameters #unspool def unspool(par,outpath): from myutils import", "nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'):", "if(cosmic): strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg", "from myutils import mymkdir,mygetenv from myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if (isinstance(par['spool'],types.StringTypes)):", "nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot =", "= 'mkdir %s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0: sys.exit() dodarks = [par['dodark']]", "if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool", "= '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header print \"last image in spool:", "nameroot = nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header print", "runsex(par, cosmic) if par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par, cosmic) if par['createlc'].startswith('y'): print", "0): strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] print strg", "inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath", "print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic = 0 if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic", "if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot", "\",par['last'] if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic = 0 if par['cosmic'].startswith('y'): print", "0: sys.exit() # strg = 'mkdir %s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0:", "for i,img in enumerate(par['spool']): if img.endswith('.fits'): img = img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img)", "unspool(par, outpath) cosmic = 0 if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic = 1", "from myutils import mymkdir,mygetenv from unspool import unspoolit if mymkdir(outpath)!= 0: sys.exit() if", "f.read() parameters = eval(config_string) return parameters #unspool def unspool(par,outpath): from myutils import mymkdir,mygetenv", "sextractor def runsex(par,cosmic): if (cosmic == 1): strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_", "0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool']", "range(1,len(par['spool'])): if dodarks[0] == 3 or dodarks[0] == 0: dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/'", "inpath=par['impath']+'/'+par['imdir']+'/' print inpath print par['spool'], dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0: sys.exit() if (isinstance(par['spool'],types.StringTypes)):", "nameroot, par['target']) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ###################", "0 or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last image in spool: \",par['last'] if par['unspool'].startswith('y'):", "done'%par['spool'] os.system(strg) if (cosmic == 0): strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python", "%d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg)", "print par['spool'], dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0: sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'):", "if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0]", "cosmic): if (cosmic): strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool'])", "= '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header print \"last image in spool: \",par['last'] if par['last']", "to be found'%fname sys.exit(0) else : for i,img in enumerate(par['spool']): if img.endswith('.fits'): img", "cosmic) if par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par, cosmic) if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating", "/science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] os.system(strg) if (cosmic == 0): strg='for", "if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ################### cosmics and", "python runsextractor.py $i; done'%par['spool'] print strg os.system(strg) #photometry def photometry(par, cosmic): if (cosmic):", "aperture photometry def myapphot(par, cosmic): from myutils import mymkdir,mygetenv from myapphot import *", "extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) if par['myapphot'].startswith('y'): print \"\\n\\n\\nrunning my aperture", "par['last'], par['spool']) os.system(strg) else: strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'],", "1): strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i in", "iraf photometry...\\n\\n\\n\" photometry(par, cosmic) if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg = 'python", "= '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname): ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if", "(isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if", "to unspool\" else: for i in range(1,len(par['spool'])): if dodarks[0] == 3 or dodarks[0]", "%s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) if par['myapphot'].startswith('y'): print \"\\n\\n\\nrunning my aperture photometry...\\n\\n\\n\" myapphot(par,", "= nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath", "mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f = open(configfile, 'r') config_string = f.read() parameters =", "photometry(par, cosmic) if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/", "import types par = readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'):", "(sex)...\\n\\n\\n\" runsex(par, cosmic) if par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par, cosmic) if par['createlc'].startswith('y'):", "os.path.isfile(fname): ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC:", "(isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname):", "strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) else: strg", "from myutils import mymkdir,mygetenv import types par = readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT') if", "os.system(strg) #photometry def photometry(par, cosmic): if (cosmic): strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s", "i in range(1,len(par['spool'])): if dodarks[0] == 3 or dodarks[0] == 0: dodarks.append(dodarks[0]) else:", "0: sys.exit() if mymkdir(outpath+'/unspooled')!= 0: sys.exit() # strg = 'mkdir %s'%outpath # os.system(strg)", "par['last']=int(image[0].header['NAXIS3']) print \"last image in spool: \",par['last'] if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath)", "exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ################### cosmics and sextractor def runsex(par,cosmic): if (cosmic == 1):", "runsex(par,cosmic): if (cosmic == 1): strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last'])", "cosmic) if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s", "'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do", "\"last image in spool: \",par['last'] if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic =", "'mkdir %s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0: sys.exit() dodarks = [par['dodark']] if", "spool %s to be found'%fname sys.exit(0) ################### aperture photometry def myapphot(par, cosmic): from", "ret =myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot, par['target']) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC:", "as PF sys.path.append(\"../LIHSPcommon\") from myutils import mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f", "strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) ###################################################################################### from", "(cosmic == 0): strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool']", "photometry...\\n\\n\\n\" photometry(par, cosmic) if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg = 'python extractlc.py", "\"last image in spool: \",par['last'] if par['last'] == 0 or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3'])", "sys,os,time import pyfits as PF sys.path.append(\"../LIHSPcommon\") from myutils import mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT')", "img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0)", "in spool: \",par['last'] if par['last'] == 0 or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last", "= 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg = 'python", "else : nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp =", "\"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ################### cosmics and sextractor def runsex(par,cosmic): if", "print \"last image in spool: \",par['last'] if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic", "= nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot, par['target'])", "if mymkdir(heredir)!= 0: sys.exit() dodarks = [par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print \"only 1 file", "parameters = eval(config_string) return parameters #unspool def unspool(par,outpath): from myutils import mymkdir,mygetenv from", "'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) ###################################################################################### from myutils import", "nameroot.endswith('.fits'): nameroot = nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname): ret = unspoolit(inpath,", "%s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d", "else: print 'no spool %s to be found'%fname sys.exit(0) ################### aperture photometry def", "if nameroot.endswith('.fits'): nameroot = nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname): ret =", "print \"only 1 file to unspool\" else: for i in range(1,len(par['spool'])): if dodarks[0]", "runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python", "'%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot)", "be found'%fname sys.exit(0) else : for i,img in enumerate(par['spool']): if img.endswith('.fits'): img =", "nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'):", "in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] print strg os.system(strg) #photometry def", "return parameters #unspool def unspool(par,outpath): from myutils import mymkdir,mygetenv from unspool import unspoolit", "###################################################################################### ################### cosmics and sextractor def runsex(par,cosmic): if (cosmic == 1): strg =", "= 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) else: strg =", "print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ################### cosmics and sextractor def runsex(par,cosmic):", "else: print 'no spool %s to be found'%fname sys.exit(0) else : for i,img", "nameroot = nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname): ret = unspoolit(inpath, nameroot+'.fits',", "inpath print par['spool'], dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0: sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if", "/science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) else: strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/", "from myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot", "else: for i in range(1,len(par['spool'])): if dodarks[0] == 3 or dodarks[0] == 0:", "dodarks[i], heredir,outpath) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print", "inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot, par['target']) if ret", "strg os.system(strg) #photometry def photometry(par, cosmic): if (cosmic): strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/", "nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'], int(par['last']),", "par['centroid'], nameroot, par['target']) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ######################################################################################", "= '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath =", ": nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot) tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'],", "/science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s", "print \"last image in spool: \",par['last'] if par['last'] == 0 or par['last'] >image[0].header['NAXIS3']:", "dodarks[0] == 0: dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath print par['spool'], dodarks heredir=speedyout+'/darks/'", "nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname): ret", "SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath =", "int(par['last']), par['ap'], par['centroid'], nameroot, par['target']) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\"", "unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\"", "inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot)", "strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] print strg os.system(strg)", "phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) else: strg = 'python phot.py", "def readconfig(configfile): f = open(configfile, 'r') config_string = f.read() parameters = eval(config_string) return", "dodarks[0] == 3 or dodarks[0] == 0: dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath", "par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic = 0 if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\"", "'%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header print \"last image in spool: \",par['last'] if par['last'] ==", "def photometry(par, cosmic): if (cosmic): strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'],", "\"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic = 1 if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic) if", "do python runsextractor.py $i; done'%par['spool'] print strg os.system(strg) #photometry def photometry(par, cosmic): if", "os.system(strg) strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] os.system(strg) if", "nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath =", "header=image[0].header print \"last image in spool: \",par['last'] if par['last'] == 0 or par['last']", "= '%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot)", "strg = 'mkdir %s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0: sys.exit() dodarks =", "= unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed.", "runsextractor.py $i; done'%par['spool'] os.system(strg) if (cosmic == 0): strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ;", "nameroot = nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5]", "= 1 if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic) if par['phot'].startswith('y'): print \"\\n\\n\\nrunning", "failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ################### cosmics and sextractor def runsex(par,cosmic): if (cosmic ==", "sys.exit() # strg = 'mkdir %s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0: sys.exit()", "== 1): strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i", "if (cosmic == 0): strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py $i;", "unspool\" else: for i in range(1,len(par['spool'])): if dodarks[0] == 3 or dodarks[0] ==", "in enumerate(par['spool']): if img.endswith('.fits'): img = img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname):", "tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) image=PF.open(tmp) header=image[0].header print \"last image in spool: \",par['last'] if", "/science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) if par['myapphot'].startswith('y'): print \"\\n\\n\\nrunning my aperture photometry...\\n\\n\\n\"", "photometry(par, cosmic): if (cosmic): strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'],", "types par = readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot", "= '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname): ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if", "par['last'] == 0 or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last image in spool: \",par['last']", "if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0]", "'%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname): ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if ret", "%d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap'])", "= [par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print \"only 1 file to unspool\" else: for i", "if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s", "par['last'], par['spool']) os.system(strg) ###################################################################################### from myutils import mymkdir,mygetenv import types par = readconfig(sys.argv[1])", "mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f = open(configfile, 'r') config_string = f.read() parameters", "image=PF.open(tmp) header=image[0].header print \"last image in spool: \",par['last'] if par['last'] == 0 or", "\"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool %s to be found'%fname", "sys.exit(0) else: print 'no spool %s to be found'%fname sys.exit(0) else : for", "spool: \",par['last'] if par['last'] == 0 or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last image", "'%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath,", "mymkdir(outpath+'/unspooled')!= 0: sys.exit() # strg = 'mkdir %s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!=", "sys.exit(0) else: print 'no spool %s to be found'%fname sys.exit(0) ################### aperture photometry", "import mymkdir,mygetenv import types par = readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool']", "= nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath", "else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)):", "from unspool import unspoolit if mymkdir(outpath)!= 0: sys.exit() if mymkdir(outpath+'/unspooled')!= 0: sys.exit() #", "print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par, cosmic) if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg", "nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'):", "print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic) if par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par, cosmic)", "mymkdir(heredir)!= 0: sys.exit() dodarks = [par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print \"only 1 file to", "heredir,outpath,0) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no", "= nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5]", "print par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath =", "nameroot) image=PF.open(tmp) header=image[0].header print \"last image in spool: \",par['last'] if par['last'] == 0", "(cosmic == 1): strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for", "print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic = 1 if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic)", "readconfig(configfile): f = open(configfile, 'r') config_string = f.read() parameters = eval(config_string) return parameters", "if (cosmic): strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg)", "%d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) ###################################################################################### from myutils import mymkdir,mygetenv import types par", "!=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ################### cosmics and sextractor def", "#photometry def photometry(par, cosmic): if (cosmic): strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d", "if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic) if par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\"", "'no spool %s to be found'%fname sys.exit(0) ################### aperture photometry def myapphot(par, cosmic):", "ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling", "eval(config_string) return parameters #unspool def unspool(par,outpath): from myutils import mymkdir,mygetenv from unspool import", "unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool %s to be found'%fname sys.exit(0)", "'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) else: strg = 'python", "print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap'])", "cosmic = 1 if par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic) if par['phot'].startswith('y'): print", "to be found'%fname sys.exit(0) ################### aperture photometry def myapphot(par, cosmic): from myutils import", "strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else: strg =", "# os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0: sys.exit() dodarks = [par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print", "= nameroot[:-5] inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot) else: if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot =", "os.system(strg) ###################################################################################### from myutils import mymkdir,mygetenv import types par = readconfig(sys.argv[1]) print par", ">image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last image in spool: \",par['last'] if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par,", "inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot)", "par['sextract'].startswith('y'): print \"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic) if par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par,", "%s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) if par['myapphot'].startswith('y'): print \"\\n\\n\\nrunning my aperture photometry...\\n\\n\\n\" myapphot(par, cosmic)", "nameroot = nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot = nameroot[:-5]", "[par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print \"only 1 file to unspool\" else: for i in", "PF sys.path.append(\"../LIHSPcommon\") from myutils import mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f =", "lcvs...\\n\\n\\n\" if(cosmic): strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) else:", "par['target']) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ################### cosmics", "mymkdir(heredir)!= 0: sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] fname =", "\",par['last'] if par['last'] == 0 or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last image in", "= 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) if par['myapphot'].startswith('y'): print \"\\n\\n\\nrunning", "config_string = f.read() parameters = eval(config_string) return parameters #unspool def unspool(par,outpath): from myutils", "'%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot) if os.path.isfile(fname): ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if ret", "def myapphot(par, cosmic): from myutils import mymkdir,mygetenv from myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT') if", "!=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool %s to", "fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname): ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath)", "photometry failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) ###################################################################################### ################### cosmics and sextractor def runsex(par,cosmic): if (cosmic", "; do python runsextractor.py $i; done'%par['spool'] os.system(strg) if (cosmic == 0): strg='for i", "os.system(strg) else: strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg) if", "phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) ###################################################################################### from myutils import mymkdir,mygetenv", "os.path.isfile(fname): ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC:", "mymkdir,mygetenv import types par = readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if", "par['spool'], dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0: sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot", "ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else: print 'no spool %s", "\"\\n\\n\\nextracting (sex)...\\n\\n\\n\" runsex(par, cosmic) if par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par, cosmic) if", "mymkdir,mygetenv from unspool import unspoolit if mymkdir(outpath)!= 0: sys.exit() if mymkdir(outpath+'/unspooled')!= 0: sys.exit()", "if par['last'] == 0 or par['last'] >image[0].header['NAXIS3']: par['last']=int(image[0].header['NAXIS3']) print \"last image in spool:", "inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0) else:", "################### aperture photometry def myapphot(par, cosmic): from myutils import mymkdir,mygetenv from myapphot import", "###################################################################################### from myutils import mymkdir,mygetenv import types par = readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT')", "%s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py $i;", "(cosmic): strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool']) os.system(strg) else:", "/science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py", "sys.exit(0) ################### aperture photometry def myapphot(par, cosmic): from myutils import mymkdir,mygetenv from myapphot", "strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last']) os.system(strg) strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits", "if (cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot)", "sys.exit(0) else : for i,img in enumerate(par['spool']): if img.endswith('.fits'): img = img[:-5] fname", "dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0: sys.exit() if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot =", "nameroot[:-5] inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot) ret =myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot, par['target']) if", "mymkdir(outpath)!= 0: sys.exit() if mymkdir(outpath+'/unspooled')!= 0: sys.exit() # strg = 'mkdir %s'%outpath #", "if os.path.isfile(fname): ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if ret !=1: print", "== 0: dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath print par['spool'], dodarks heredir=speedyout+'/darks/' if", "par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot)", "* SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic): if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath", "0: sys.exit() dodarks = [par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print \"only 1 file to unspool\"", "nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot", "\"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap']) os.system(strg)", "in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] os.system(strg) if (cosmic == 0):", "nameroot.endswith('.fits'): nameroot = nameroot[:-5] inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot) else: nameroot=par['spool'][0] if nameroot.endswith('.fits'): nameroot =", "else : for i,img in enumerate(par['spool']): if img.endswith('.fits'): img = img[:-5] fname =", "%s'%outpath # os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0: sys.exit() dodarks = [par['dodark']] if (isinstance(par['spool'],types.StringTypes)):", "i,img in enumerate(par['spool']): if img.endswith('.fits'): img = img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if", "i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] print strg os.system(strg) #photometry", "readconfig(sys.argv[1]) print par SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath", "(isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0]", "img) if os.path.isfile(fname): ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath) if ret !=1:", "mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f = open(configfile, 'r') config_string = f.read()", "=myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot, par['target']) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry", "par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot, par['target']) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: photometry failed.", "= img[:-5] fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img) if os.path.isfile(fname): ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg',", "file to unspool\" else: for i in range(1,len(par['spool'])): if dodarks[0] == 3 or", "found'%fname sys.exit(0) ################### aperture photometry def myapphot(par, cosmic): from myutils import mymkdir,mygetenv from", "sys.exit(0) ###################################################################################### ################### cosmics and sextractor def runsex(par,cosmic): if (cosmic == 1): strg", "os.system(strg) heredir=mygetenv('SPEEDYOUT')+'/darks/' if mymkdir(heredir)!= 0: sys.exit() dodarks = [par['dodark']] if (isinstance(par['spool'],types.StringTypes)): print \"only", "dodarks.append(dodarks[0]) else: dodarks.append(2) inpath=par['impath']+'/'+par['imdir']+'/' print inpath print par['spool'], dodarks heredir=speedyout+'/darks/' if mymkdir(heredir)!= 0:", "#unspool def unspool(par,outpath): from myutils import mymkdir,mygetenv from unspool import unspoolit if mymkdir(outpath)!=", "\"only 1 file to unspool\" else: for i in range(1,len(par['spool'])): if dodarks[0] ==", "if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else : nameroot=par['spool'][0] if nameroot.endswith('.fits'):", "cosmic = 0 if par['cosmic'].startswith('y'): print \"\\n\\n\\nremoving cosmics...\\n\\n\\n\" cosmic = 1 if par['sextract'].startswith('y'):", "SPEEDYOUT=mygetenv('SPEEDYOUT') if (isinstance(par['spool'],types.StringTypes)): nameroot=par['spool'] if nameroot.endswith('.fits'): nameroot = nameroot[:-5] outpath = '%s/%s/'%(SPEEDYOUT,nameroot) else", "from myutils import mygetenv,readconfig, mymkdir, mjd speedyout=mygetenv('SPEEDYOUT') def readconfig(configfile): f = open(configfile, 'r')", "myapphot(par, cosmic): from myutils import mymkdir,mygetenv from myapphot import * SPEEDYOUT=mygetenv('SPEEDYOUT') if (cosmic):", "nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0) if ret !=1: print \"\\n\\n\\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\\n\\n\\n\" sys.exit(0)", "in spool: \",par['last'] if par['unspool'].startswith('y'): print \"\\n\\n\\nUNSPOOLING\\n\\n\\n\" unspool(par, outpath) cosmic = 0 if", "if (isinstance(par['spool'],types.StringTypes)): print \"only 1 file to unspool\" else: for i in range(1,len(par['spool'])):", "if par['phot'].startswith('y'): print \"\\n\\n\\nrunning iraf photometry...\\n\\n\\n\" photometry(par, cosmic) if par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\"", "/science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool'] print strg os.system(strg) #photometry def photometry(par,", "par['createlc'].startswith('y'): print \"\\n\\n\\ncreating lcvs...\\n\\n\\n\" if(cosmic): strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d", "print strg os.system(strg) #photometry def photometry(par, cosmic): if (cosmic): strg = 'python phot.py", "myutils import mymkdir,mygetenv from unspool import unspoolit if mymkdir(outpath)!= 0: sys.exit() if mymkdir(outpath+'/unspooled')!=", "parameters #unspool def unspool(par,outpath): from myutils import mymkdir,mygetenv from unspool import unspoolit if" ]
[ "anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) #", "# detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model", "= params['classes'] output_path = params['output_path'] abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d,", "converter.experimental_new_converter = True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) #", "= params['rpn_head_dim'] fc_denses = params['fc_denses'] block_settings = params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold =", "datetime import datetime import matplotlib.pyplot as plt import tensorflow as tf from pycocotools.coco", "as np from datetime import datetime import matplotlib.pyplot as plt import tensorflow as", "block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path))", "from utils import genanchors, box2frame from settings import settings params = settings('non-fpn-inference') asizes", "output_path = params['output_path'] abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d, dtype='float32') rpn_model,", "detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model =", "ishape = params['ishape'] ssize = params['ssize'] max_num_of_rois = params['max_num_of_rois'] unified_roi_size = params['unified_roi_size'] rpn_head_dim", "params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim'] fc_denses = params['fc_denses'] block_settings = params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold']", "classes = params['classes'] output_path = params['output_path'] abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor =", "params['ssize'] max_num_of_rois = params['max_num_of_rois'] unified_roi_size = params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim'] fc_denses = params['fc_denses']", "build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path),", "params = settings('non-fpn-inference') asizes = params['asizes'] ishape = params['ishape'] ssize = params['ssize'] max_num_of_rois", "detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True converter.target_spec.supported_ops =", "as tf from pycocotools.coco import COCO from matplotlib.patches import Rectangle from models import", "rpn_head_dim = params['rpn_head_dim'] fc_denses = params['fc_denses'] block_settings = params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold", "rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter =", "rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter", "= tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path),", "settings('non-fpn-inference') asizes = params['asizes'] ishape = params['ishape'] ssize = params['ssize'] max_num_of_rois = params['max_num_of_rois']", "= params['nsm_score_threshold'] classes = params['classes'] output_path = params['output_path'] abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes)", "= tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter = True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] # tflite_model", "params['fc_denses'] block_settings = params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold'] classes = params['classes']", "= converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter = True #", "abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model = build_inference_maskrcnn_non_fpn(", "ssize=ssize, asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes,", "tensorflow as tf from pycocotools.coco import COCO from matplotlib.patches import Rectangle from models", "tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter =", "= params['output_path'] abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model", "= build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False)", "params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold'] classes = params['classes'] output_path = params['output_path'] abox4d = genanchors(isize=ishape[:2],", "COCO from matplotlib.patches import Rectangle from models import build_inference_maskrcnn_non_fpn from utils import genanchors,", "fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter =", "nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path))", "params['asizes'] ishape = params['ishape'] ssize = params['ssize'] max_num_of_rois = params['max_num_of_rois'] unified_roi_size = params['unified_roi_size']", "import build_inference_maskrcnn_non_fpn from utils import genanchors, box2frame from settings import settings params =", "import settings params = settings('non-fpn-inference') asizes = params['asizes'] ishape = params['ishape'] ssize =", "import datetime import matplotlib.pyplot as plt import tensorflow as tf from pycocotools.coco import", "= genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape,", "tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter = True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] # tflite_model =", "converter.experimental_new_converter = True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] # tflite_model = converter.convert() #", "unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path))", "import numpy as np from datetime import datetime import matplotlib.pyplot as plt import", "Rectangle from models import build_inference_maskrcnn_non_fpn from utils import genanchors, box2frame from settings import", "models import build_inference_maskrcnn_non_fpn from utils import genanchors, box2frame from settings import settings params", "rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]", "asizes = params['asizes'] ishape = params['ishape'] ssize = params['ssize'] max_num_of_rois = params['max_num_of_rois'] unified_roi_size", "converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert()", "import Rectangle from models import build_inference_maskrcnn_non_fpn from utils import genanchors, box2frame from settings", "genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor,", "= params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold'] classes = params['classes'] output_path = params['output_path'] abox4d =", "= params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim'] fc_denses = params['fc_denses'] block_settings = params['resnet'] nsm_iou_threshold =", "from matplotlib.patches import Rectangle from models import build_inference_maskrcnn_non_fpn from utils import genanchors, box2frame", "= True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] # tflite_model = converter.convert() # open('{}/detection_model.tflite'.format(output_path),", "params['max_num_of_rois'] unified_roi_size = params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim'] fc_denses = params['fc_denses'] block_settings = params['resnet']", "<filename>maskrcnn/tflite_convert.py import numpy as np from datetime import datetime import matplotlib.pyplot as plt", "params['output_path'] abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model =", "datetime import matplotlib.pyplot as plt import tensorflow as tf from pycocotools.coco import COCO", "anchor_4dtensor = tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold,", "open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter = True # converter.target_spec.supported_ops =", "nsm_score_threshold = params['nsm_score_threshold'] classes = params['classes'] output_path = params['output_path'] abox4d = genanchors(isize=ishape[:2], ssize=ssize,", "True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] # tflite_model = converter.convert() # open('{}/detection_model.tflite'.format(output_path), 'wb').write(tflite_model)", "by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True", "True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter =", "= [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) #", "utils import genanchors, box2frame from settings import settings params = settings('non-fpn-inference') asizes =", "params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold'] classes = params['classes'] output_path = params['output_path']", "= True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter", "box2frame from settings import settings params = settings('non-fpn-inference') asizes = params['asizes'] ishape =", "base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter", "np from datetime import datetime import matplotlib.pyplot as plt import tensorflow as tf", "= params['ishape'] ssize = params['ssize'] max_num_of_rois = params['max_num_of_rois'] unified_roi_size = params['unified_roi_size'] rpn_head_dim =", "ssize = params['ssize'] max_num_of_rois = params['max_num_of_rois'] unified_roi_size = params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim'] fc_denses", "params['nsm_score_threshold'] classes = params['classes'] output_path = params['output_path'] abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor", "unified_roi_size = params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim'] fc_denses = params['fc_denses'] block_settings = params['resnet'] nsm_iou_threshold", "= tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold,", "[tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter", "rpn_model, detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses,", "= params['max_num_of_rois'] unified_roi_size = params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim'] fc_denses = params['fc_denses'] block_settings =", "'wb').write(tflite_model) # converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter = True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS,", "detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings,", "import COCO from matplotlib.patches import Rectangle from models import build_inference_maskrcnn_non_fpn from utils import", "settings params = settings('non-fpn-inference') asizes = params['asizes'] ishape = params['ishape'] ssize = params['ssize']", "from pycocotools.coco import COCO from matplotlib.patches import Rectangle from models import build_inference_maskrcnn_non_fpn from", "genanchors, box2frame from settings import settings params = settings('non-fpn-inference') asizes = params['asizes'] ishape", "params['rpn_head_dim'] fc_denses = params['fc_denses'] block_settings = params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold']", "import genanchors, box2frame from settings import settings params = settings('non-fpn-inference') asizes = params['asizes']", "tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size,", "classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path),", "plt import tensorflow as tf from pycocotools.coco import COCO from matplotlib.patches import Rectangle", "import matplotlib.pyplot as plt import tensorflow as tf from pycocotools.coco import COCO from", "from settings import settings params = settings('non-fpn-inference') asizes = params['asizes'] ishape = params['ishape']", "by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS,", "= params['asizes'] ishape = params['ishape'] ssize = params['ssize'] max_num_of_rois = params['max_num_of_rois'] unified_roi_size =", "converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter = True # converter.target_spec.supported_ops", "max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True)", "fc_denses = params['fc_denses'] block_settings = params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold'] classes", "= params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold'] classes = params['classes'] output_path =", "tf from pycocotools.coco import COCO from matplotlib.patches import Rectangle from models import build_inference_maskrcnn_non_fpn", "# converter.experimental_new_converter = True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] # tflite_model = converter.convert()", "nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold'] classes = params['classes'] output_path = params['output_path'] abox4d", "converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path))", "tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model) # converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter = True", "from models import build_inference_maskrcnn_non_fpn from utils import genanchors, box2frame from settings import settings", "# converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter = True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]", "converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path)) # converter.experimental_new_converter = True # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] #", "# detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) # detection_model.save('{}/detection_model'.format(output_path)) converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True converter.target_spec.supported_ops", "ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True)", "params['classes'] output_path = params['output_path'] abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d, dtype='float32')", "pycocotools.coco import COCO from matplotlib.patches import Rectangle from models import build_inference_maskrcnn_non_fpn from utils", "max_num_of_rois = params['max_num_of_rois'] unified_roi_size = params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim'] fc_denses = params['fc_denses'] block_settings", "tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path)) converter.experimental_new_converter = True converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model)", "import tensorflow as tf from pycocotools.coco import COCO from matplotlib.patches import Rectangle from", "nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim, fc_denses=fc_denses, block_settings=block_settings, base_block_trainable=False) rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True) # detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True) rpn_model.save('{}/rpn_model'.format(output_path)) #", "from datetime import datetime import matplotlib.pyplot as plt import tensorflow as tf from", "dtype='float32') rpn_model, detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois, nsm_iou_threshold=nsm_iou_threshold, nsm_score_threshold=nsm_score_threshold, unified_roi_size=unified_roi_size, rpn_head_dim=rpn_head_dim,", "build_inference_maskrcnn_non_fpn from utils import genanchors, box2frame from settings import settings params = settings('non-fpn-inference')", "settings import settings params = settings('non-fpn-inference') asizes = params['asizes'] ishape = params['ishape'] ssize", "= params['ssize'] max_num_of_rois = params['max_num_of_rois'] unified_roi_size = params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim'] fc_denses =", "block_settings = params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold'] classes = params['classes'] output_path", "numpy as np from datetime import datetime import matplotlib.pyplot as plt import tensorflow", "matplotlib.pyplot as plt import tensorflow as tf from pycocotools.coco import COCO from matplotlib.patches", "matplotlib.patches import Rectangle from models import build_inference_maskrcnn_non_fpn from utils import genanchors, box2frame from", "as plt import tensorflow as tf from pycocotools.coco import COCO from matplotlib.patches import", "params['ishape'] ssize = params['ssize'] max_num_of_rois = params['max_num_of_rois'] unified_roi_size = params['unified_roi_size'] rpn_head_dim = params['rpn_head_dim']", "= settings('non-fpn-inference') asizes = params['asizes'] ishape = params['ishape'] ssize = params['ssize'] max_num_of_rois =", "= params['fc_denses'] block_settings = params['resnet'] nsm_iou_threshold = params['nsm_iou_threshold'] nsm_score_threshold = params['nsm_score_threshold'] classes =", "asizes=asizes) anchor_4dtensor = tf.constant(value=abox4d, dtype='float32') rpn_model, detection_model = build_inference_maskrcnn_non_fpn( ishape=ishape, anchor_4dtensor=anchor_4dtensor, classes=classes, max_num_of_rois=max_num_of_rois," ]
[ "RegularGridInterpolator from scipy.ndimage.filters import gaussian_filter \"\"\" Elastic deformation of images as described in", "not much!\" shape = img_numpy.shape # Define 3D coordinate system coords = np.arange(shape[0]),", "assert img_numpy.ndim == 3 , 'Wrong img shape, provide 3D img' if labels", "from scipy.ndimage.filters import gaussian_filter \"\"\" Elastic deformation of images as described in <NAME>,", "alpha dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha", "is not None: lab_intrp = RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False, fill_value=0) labels = lab_intrp(indices).reshape(shape).astype(labels.dtype)", "Recognition, 2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take 3D inputs Deforms both", ", \"Shapes of img and label do not much!\" shape = img_numpy.shape #", "2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take 3D inputs Deforms both the", "1)) # Interpolate 3D image image img_numpy = im_intrps(indices).reshape(shape) # Interpolate labels if", "assert img_numpy.shape == labels.shape , \"Shapes of img and label do not much!\"", "scipy.ndimage.filters import gaussian_filter \"\"\" Elastic deformation of images as described in <NAME>, \"Best", ", 'Wrong img shape, provide 3D img' if labels is not None: assert", "are interpolated via nearest neighbour \"\"\" def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method=\"linear\"):", "from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take 3D inputs Deforms both the image and", "import RegularGridInterpolator from scipy.ndimage.filters import gaussian_filter \"\"\" Elastic deformation of images as described", "- 1), sigma, mode=\"constant\", cval=0.) * alpha dy = gaussian_filter((np.random.rand(*shape) * 2 -", "volumes are interpolated via nearest neighbour \"\"\" def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0,", "y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices = np.reshape(x + dx, (-1, 1)),", "img im_intrps = RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False, fill_value=c_val) # Get random elastic deformations", "= img_numpy.shape # Define 3D coordinate system coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) #", "+ dy, (-1, 1)), \\ np.reshape(z + dz, (-1, 1)) # Interpolate 3D", "1), sigma, mode=\"constant\", cval=0.) * alpha dz = gaussian_filter((np.random.rand(*shape) * 2 - 1),", "is not None: assert img_numpy.shape == labels.shape , \"Shapes of img and label", "of the International Conference on Document Analysis and Recognition, 2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a", "image labels :param alpha: scaling factor of gaussian filter :param sigma: standard deviation", "img shape, provide 3D img' if labels is not None: assert img_numpy.shape ==", "dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dy", "Define 3D coordinate system coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) # Interpolated img im_intrps", "img_numpy: 3D medical image modality :param labels: 3D medical image labels :param alpha:", "RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False, fill_value=c_val) # Get random elastic deformations dx = gaussian_filter((np.random.rand(*shape)", "Elastic deformation of images as described in <NAME>, \"Best Practices for Convolutional Neural", "\"Shapes of img and label do not much!\" shape = img_numpy.shape # Define", "labels is not None: assert img_numpy.shape == labels.shape , \"Shapes of img and", "def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method=\"linear\"): \"\"\" :param img_numpy: 3D medical image", "as np from scipy.interpolate import RegularGridInterpolator from scipy.ndimage.filters import gaussian_filter \"\"\" Elastic deformation", "and Recognition, 2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take 3D inputs Deforms", "img_numpy.shape == labels.shape , \"Shapes of img and label do not much!\" shape", "np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) # Interpolated img im_intrps = RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False, fill_value=c_val)", "gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dy = gaussian_filter((np.random.rand(*shape)", "1)), \\ np.reshape(z + dz, (-1, 1)) # Interpolate 3D image image img_numpy", "image img_numpy = im_intrps(indices).reshape(shape) # Interpolate labels if labels is not None: lab_intrp", "\\ np.reshape(z + dz, (-1, 1)) # Interpolate 3D image image img_numpy =", "labels=None, alpha=1, sigma=20, c_val=0.0, method=\"linear\"): \"\"\" :param img_numpy: 3D medical image modality :param", "Modified to take 3D inputs Deforms both the image and corresponding label file", "* alpha # Define sample points x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]]", "== labels.shape , \"Shapes of img and label do not much!\" shape =", ": (\"linear\", \"nearest\") :return: deformed image and/or label \"\"\" assert img_numpy.ndim == 3", "of gaussian filter :param sigma: standard deviation of random gaussian filter :param c_val:", "import numpy as np from scipy.interpolate import RegularGridInterpolator from scipy.ndimage.filters import gaussian_filter \"\"\"", "im_intrps = RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False, fill_value=c_val) # Get random elastic deformations dx", "if labels is not None: lab_intrp = RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False, fill_value=0) labels", "filter :param c_val: fill value :param method: interpolation method. supported methods : (\"linear\",", "labels :param alpha: scaling factor of gaussian filter :param sigma: standard deviation of", "'Wrong img shape, provide 3D img' if labels is not None: assert img_numpy.shape", "Interpolate labels if labels is not None: lab_intrp = RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False,", "inputs Deforms both the image and corresponding label file Label volumes are interpolated", "(-1, 1)), \\ np.reshape(z + dz, (-1, 1)) # Interpolate 3D image image", "from scipy.interpolate import RegularGridInterpolator from scipy.ndimage.filters import gaussian_filter \"\"\" Elastic deformation of images", "factor of gaussian filter :param sigma: standard deviation of random gaussian filter :param", "None: lab_intrp = RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False, fill_value=0) labels = lab_intrp(indices).reshape(shape).astype(labels.dtype) return img_numpy,", ":param c_val: fill value :param method: interpolation method. supported methods : (\"linear\", \"nearest\")", "Neural Networks applied to Visual Document Analysis\", in Proc. of the International Conference", "bounds_error=False, fill_value=c_val) # Get random elastic deformations dx = gaussian_filter((np.random.rand(*shape) * 2 -", "labels is not None: lab_intrp = RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False, fill_value=0) labels =", "via nearest neighbour \"\"\" def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method=\"linear\"): \"\"\" :param", "3D image image img_numpy = im_intrps(indices).reshape(shape) # Interpolate labels if labels is not", "and/or label \"\"\" assert img_numpy.ndim == 3 , 'Wrong img shape, provide 3D", ":param method: interpolation method. supported methods : (\"linear\", \"nearest\") :return: deformed image and/or", "c_val=0.0, method=\"linear\"): \"\"\" :param img_numpy: 3D medical image modality :param labels: 3D medical", "elastic deformations dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) *", "alpha # Define sample points x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices", "on Document Analysis and Recognition, 2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take", "3D medical image modality :param labels: 3D medical image labels :param alpha: scaling", "dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dz", "numpy as np from scipy.interpolate import RegularGridInterpolator from scipy.ndimage.filters import gaussian_filter \"\"\" Elastic", "3D coordinate system coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) # Interpolated img im_intrps =", "for Convolutional Neural Networks applied to Visual Document Analysis\", in Proc. of the", "filter :param sigma: standard deviation of random gaussian filter :param c_val: fill value", "img_numpy, method=method, bounds_error=False, fill_value=c_val) # Get random elastic deformations dx = gaussian_filter((np.random.rand(*shape) *", "+ dx, (-1, 1)), \\ np.reshape(y + dy, (-1, 1)), \\ np.reshape(z +", "\"\"\" Elastic deformation of images as described in <NAME>, \"Best Practices for Convolutional", "cval=0.) * alpha dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.)", "\"Best Practices for Convolutional Neural Networks applied to Visual Document Analysis\", in Proc.", "random gaussian filter :param c_val: fill value :param method: interpolation method. supported methods", "fill value :param method: interpolation method. supported methods : (\"linear\", \"nearest\") :return: deformed", "2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dy = gaussian_filter((np.random.rand(*shape) * 2", "file Label volumes are interpolated via nearest neighbour \"\"\" def elastic_transform_3d(img_numpy, labels=None, alpha=1,", "images as described in <NAME>, \"Best Practices for Convolutional Neural Networks applied to", "= np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices = np.reshape(x + dx, (-1, 1)), \\ np.reshape(y", "0:shape[1], 0:shape[2]] indices = np.reshape(x + dx, (-1, 1)), \\ np.reshape(y + dy,", ":param alpha: scaling factor of gaussian filter :param sigma: standard deviation of random", "image image img_numpy = im_intrps(indices).reshape(shape) # Interpolate labels if labels is not None:", "coordinate system coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) # Interpolated img im_intrps = RegularGridInterpolator(coords,", "2 - 1), sigma, mode=\"constant\", cval=0.) * alpha # Define sample points x,", "much!\" shape = img_numpy.shape # Define 3D coordinate system coords = np.arange(shape[0]), np.arange(shape[1]),", "\"\"\" assert img_numpy.ndim == 3 , 'Wrong img shape, provide 3D img' if", "shape, provide 3D img' if labels is not None: assert img_numpy.shape == labels.shape", "if labels is not None: assert img_numpy.shape == labels.shape , \"Shapes of img", "scipy.interpolate import RegularGridInterpolator from scipy.ndimage.filters import gaussian_filter \"\"\" Elastic deformation of images as", "elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method=\"linear\"): \"\"\" :param img_numpy: 3D medical image modality", "value :param method: interpolation method. supported methods : (\"linear\", \"nearest\") :return: deformed image", "2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dz = gaussian_filter((np.random.rand(*shape) * 2", "method: interpolation method. supported methods : (\"linear\", \"nearest\") :return: deformed image and/or label", "# Define sample points x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices =", "corresponding label file Label volumes are interpolated via nearest neighbour \"\"\" def elastic_transform_3d(img_numpy,", "np.reshape(x + dx, (-1, 1)), \\ np.reshape(y + dy, (-1, 1)), \\ np.reshape(z", "dy, (-1, 1)), \\ np.reshape(z + dz, (-1, 1)) # Interpolate 3D image", "img_numpy = im_intrps(indices).reshape(shape) # Interpolate labels if labels is not None: lab_intrp =", "gaussian filter :param c_val: fill value :param method: interpolation method. supported methods :", "described in <NAME>, \"Best Practices for Convolutional Neural Networks applied to Visual Document", "np.arange(shape[1]), np.arange(shape[2]) # Interpolated img im_intrps = RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False, fill_value=c_val) #", "Get random elastic deformations dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\",", "1)), \\ np.reshape(y + dy, (-1, 1)), \\ np.reshape(z + dz, (-1, 1))", "supported methods : (\"linear\", \"nearest\") :return: deformed image and/or label \"\"\" assert img_numpy.ndim", "(\"linear\", \"nearest\") :return: deformed image and/or label \"\"\" assert img_numpy.ndim == 3 ,", "== 3 , 'Wrong img shape, provide 3D img' if labels is not", "points x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices = np.reshape(x + dx,", "* 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dy = gaussian_filter((np.random.rand(*shape) *", "(-1, 1)) # Interpolate 3D image image img_numpy = im_intrps(indices).reshape(shape) # Interpolate labels", "lab_intrp = RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False, fill_value=0) labels = lab_intrp(indices).reshape(shape).astype(labels.dtype) return img_numpy, labels", "to Visual Document Analysis\", in Proc. of the International Conference on Document Analysis", "import gaussian_filter \"\"\" Elastic deformation of images as described in <NAME>, \"Best Practices", "of images as described in <NAME>, \"Best Practices for Convolutional Neural Networks applied", "nearest neighbour \"\"\" def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method=\"linear\"): \"\"\" :param img_numpy:", "np.reshape(z + dz, (-1, 1)) # Interpolate 3D image image img_numpy = im_intrps(indices).reshape(shape)", "im_intrps(indices).reshape(shape) # Interpolate labels if labels is not None: lab_intrp = RegularGridInterpolator(coords, labels,", "not None: lab_intrp = RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False, fill_value=0) labels = lab_intrp(indices).reshape(shape).astype(labels.dtype) return", "sigma, mode=\"constant\", cval=0.) * alpha dz = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma,", "mode=\"constant\", cval=0.) * alpha # Define sample points x, y, z = np.mgrid[0:shape[0],", "cval=0.) * alpha dz = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.)", "Document Analysis\", in Proc. of the International Conference on Document Analysis and Recognition,", "labels if labels is not None: lab_intrp = RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False, fill_value=0)", "the image and corresponding label file Label volumes are interpolated via nearest neighbour", "z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices = np.reshape(x + dx, (-1, 1)), \\", "Analysis and Recognition, 2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take 3D inputs", "= gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dz =", "both the image and corresponding label file Label volumes are interpolated via nearest", "\"\"\" def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method=\"linear\"): \"\"\" :param img_numpy: 3D medical", "dz = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha #", "gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha # Define sample", "label \"\"\" assert img_numpy.ndim == 3 , 'Wrong img shape, provide 3D img'", "sigma=20, c_val=0.0, method=\"linear\"): \"\"\" :param img_numpy: 3D medical image modality :param labels: 3D", "gaussian filter :param sigma: standard deviation of random gaussian filter :param c_val: fill", "interpolation method. supported methods : (\"linear\", \"nearest\") :return: deformed image and/or label \"\"\"", "sample points x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices = np.reshape(x +", "+ dz, (-1, 1)) # Interpolate 3D image image img_numpy = im_intrps(indices).reshape(shape) #", "\\ np.reshape(y + dy, (-1, 1)), \\ np.reshape(z + dz, (-1, 1)) #", "alpha dz = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha", "# Define 3D coordinate system coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) # Interpolated img", "3D inputs Deforms both the image and corresponding label file Label volumes are", "* alpha dz = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) *", "dx, (-1, 1)), \\ np.reshape(y + dy, (-1, 1)), \\ np.reshape(z + dz,", "(-1, 1)), \\ np.reshape(y + dy, (-1, 1)), \\ np.reshape(z + dz, (-1,", "# Interpolate 3D image image img_numpy = im_intrps(indices).reshape(shape) # Interpolate labels if labels", "= np.reshape(x + dx, (-1, 1)), \\ np.reshape(y + dy, (-1, 1)), \\", "x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices = np.reshape(x + dx, (-1,", "Visual Document Analysis\", in Proc. of the International Conference on Document Analysis and", "None: assert img_numpy.shape == labels.shape , \"Shapes of img and label do not", "scaling factor of gaussian filter :param sigma: standard deviation of random gaussian filter", "alpha=1, sigma=20, c_val=0.0, method=\"linear\"): \"\"\" :param img_numpy: 3D medical image modality :param labels:", "= RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False, fill_value=0) labels = lab_intrp(indices).reshape(shape).astype(labels.dtype) return img_numpy, labels return", "Convolutional Neural Networks applied to Visual Document Analysis\", in Proc. of the International", "image and corresponding label file Label volumes are interpolated via nearest neighbour \"\"\"", "\"nearest\") :return: deformed image and/or label \"\"\" assert img_numpy.ndim == 3 , 'Wrong", "Label volumes are interpolated via nearest neighbour \"\"\" def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20,", "img_numpy.shape # Define 3D coordinate system coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) # Interpolated", "* alpha dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) *", "# Interpolate labels if labels is not None: lab_intrp = RegularGridInterpolator(coords, labels, method=\"nearest\",", ":return: deformed image and/or label \"\"\" assert img_numpy.ndim == 3 , 'Wrong img", "and label do not much!\" shape = img_numpy.shape # Define 3D coordinate system", "deformations dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha", "alpha: scaling factor of gaussian filter :param sigma: standard deviation of random gaussian", "RegularGridInterpolator(coords, labels, method=\"nearest\", bounds_error=False, fill_value=0) labels = lab_intrp(indices).reshape(shape).astype(labels.dtype) return img_numpy, labels return img_numpy", "gaussian_filter \"\"\" Elastic deformation of images as described in <NAME>, \"Best Practices for", ":param sigma: standard deviation of random gaussian filter :param c_val: fill value :param", "and corresponding label file Label volumes are interpolated via nearest neighbour \"\"\" def", "deviation of random gaussian filter :param c_val: fill value :param method: interpolation method.", "img' if labels is not None: assert img_numpy.shape == labels.shape , \"Shapes of", "of random gaussian filter :param c_val: fill value :param method: interpolation method. supported", "coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) # Interpolated img im_intrps = RegularGridInterpolator(coords, img_numpy, method=method,", "img and label do not much!\" shape = img_numpy.shape # Define 3D coordinate", "sigma, mode=\"constant\", cval=0.) * alpha # Define sample points x, y, z =", "method=\"linear\"): \"\"\" :param img_numpy: 3D medical image modality :param labels: 3D medical image", "Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take 3D inputs Deforms both the image", "sigma: standard deviation of random gaussian filter :param c_val: fill value :param method:", "as described in <NAME>, \"Best Practices for Convolutional Neural Networks applied to Visual", "Networks applied to Visual Document Analysis\", in Proc. of the International Conference on", "np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices = np.reshape(x + dx, (-1, 1)), \\ np.reshape(y +", "3D medical image labels :param alpha: scaling factor of gaussian filter :param sigma:", "- 1), sigma, mode=\"constant\", cval=0.) * alpha dz = gaussian_filter((np.random.rand(*shape) * 2 -", "Analysis\", in Proc. of the International Conference on Document Analysis and Recognition, 2003.", "standard deviation of random gaussian filter :param c_val: fill value :param method: interpolation", "image and/or label \"\"\" assert img_numpy.ndim == 3 , 'Wrong img shape, provide", "cval=0.) * alpha # Define sample points x, y, z = np.mgrid[0:shape[0], 0:shape[1],", "= gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dy =", "label file Label volumes are interpolated via nearest neighbour \"\"\" def elastic_transform_3d(img_numpy, labels=None,", "Proc. of the International Conference on Document Analysis and Recognition, 2003. Modified from:", "deformation of images as described in <NAME>, \"Best Practices for Convolutional Neural Networks", "3D img' if labels is not None: assert img_numpy.shape == labels.shape , \"Shapes", "Interpolate 3D image image img_numpy = im_intrps(indices).reshape(shape) # Interpolate labels if labels is", "https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take 3D inputs Deforms both the image and corresponding label", "3 , 'Wrong img shape, provide 3D img' if labels is not None:", "Interpolated img im_intrps = RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False, fill_value=c_val) # Get random elastic", "International Conference on Document Analysis and Recognition, 2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified", "mode=\"constant\", cval=0.) * alpha dz = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\",", "* 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dz = gaussian_filter((np.random.rand(*shape) *", "gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha dz = gaussian_filter((np.random.rand(*shape)", "c_val: fill value :param method: interpolation method. supported methods : (\"linear\", \"nearest\") :return:", "system coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) # Interpolated img im_intrps = RegularGridInterpolator(coords, img_numpy,", "np.reshape(y + dy, (-1, 1)), \\ np.reshape(z + dz, (-1, 1)) # Interpolate", "Document Analysis and Recognition, 2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take 3D", "interpolated via nearest neighbour \"\"\" def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method=\"linear\"): \"\"\"", "methods : (\"linear\", \"nearest\") :return: deformed image and/or label \"\"\" assert img_numpy.ndim ==", "# Get random elastic deformations dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma,", "random elastic deformations dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.)", "\"\"\" :param img_numpy: 3D medical image modality :param labels: 3D medical image labels", "mode=\"constant\", cval=0.) * alpha dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\",", "Deforms both the image and corresponding label file Label volumes are interpolated via", ":param img_numpy: 3D medical image modality :param labels: 3D medical image labels :param", "medical image labels :param alpha: scaling factor of gaussian filter :param sigma: standard", "shape = img_numpy.shape # Define 3D coordinate system coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2])", "# Interpolated img im_intrps = RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False, fill_value=c_val) # Get random", "- 1), sigma, mode=\"constant\", cval=0.) * alpha # Define sample points x, y,", "<NAME>, \"Best Practices for Convolutional Neural Networks applied to Visual Document Analysis\", in", "= RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False, fill_value=c_val) # Get random elastic deformations dx =", "np.arange(shape[2]) # Interpolated img im_intrps = RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False, fill_value=c_val) # Get", "method=method, bounds_error=False, fill_value=c_val) # Get random elastic deformations dx = gaussian_filter((np.random.rand(*shape) * 2", "deformed image and/or label \"\"\" assert img_numpy.ndim == 3 , 'Wrong img shape,", "in Proc. of the International Conference on Document Analysis and Recognition, 2003. Modified", "take 3D inputs Deforms both the image and corresponding label file Label volumes", ":param labels: 3D medical image labels :param alpha: scaling factor of gaussian filter", "to take 3D inputs Deforms both the image and corresponding label file Label", "= np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]) # Interpolated img im_intrps = RegularGridInterpolator(coords, img_numpy, method=method, bounds_error=False,", "= im_intrps(indices).reshape(shape) # Interpolate labels if labels is not None: lab_intrp = RegularGridInterpolator(coords,", "method. supported methods : (\"linear\", \"nearest\") :return: deformed image and/or label \"\"\" assert", "Practices for Convolutional Neural Networks applied to Visual Document Analysis\", in Proc. of", "1), sigma, mode=\"constant\", cval=0.) * alpha # Define sample points x, y, z", "applied to Visual Document Analysis\", in Proc. of the International Conference on Document", "indices = np.reshape(x + dx, (-1, 1)), \\ np.reshape(y + dy, (-1, 1)),", "do not much!\" shape = img_numpy.shape # Define 3D coordinate system coords =", "img_numpy.ndim == 3 , 'Wrong img shape, provide 3D img' if labels is", "label do not much!\" shape = img_numpy.shape # Define 3D coordinate system coords", "labels.shape , \"Shapes of img and label do not much!\" shape = img_numpy.shape", "sigma, mode=\"constant\", cval=0.) * alpha dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma,", "modality :param labels: 3D medical image labels :param alpha: scaling factor of gaussian", "medical image modality :param labels: 3D medical image labels :param alpha: scaling factor", "0:shape[2]] indices = np.reshape(x + dx, (-1, 1)), \\ np.reshape(y + dy, (-1,", "not None: assert img_numpy.shape == labels.shape , \"Shapes of img and label do", "* 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha # Define sample points", "dz, (-1, 1)) # Interpolate 3D image image img_numpy = im_intrps(indices).reshape(shape) # Interpolate", "neighbour \"\"\" def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method=\"linear\"): \"\"\" :param img_numpy: 3D", "provide 3D img' if labels is not None: assert img_numpy.shape == labels.shape ,", "labels: 3D medical image labels :param alpha: scaling factor of gaussian filter :param", "fill_value=c_val) # Get random elastic deformations dx = gaussian_filter((np.random.rand(*shape) * 2 - 1),", "np from scipy.interpolate import RegularGridInterpolator from scipy.ndimage.filters import gaussian_filter \"\"\" Elastic deformation of", "image modality :param labels: 3D medical image labels :param alpha: scaling factor of", "of img and label do not much!\" shape = img_numpy.shape # Define 3D", "= gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0.) * alpha # Define", "Define sample points x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]] indices = np.reshape(x", "1), sigma, mode=\"constant\", cval=0.) * alpha dy = gaussian_filter((np.random.rand(*shape) * 2 - 1),", "in <NAME>, \"Best Practices for Convolutional Neural Networks applied to Visual Document Analysis\",", "the International Conference on Document Analysis and Recognition, 2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62", "Conference on Document Analysis and Recognition, 2003. Modified from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to", "https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62 Modified to take 3D inputs Deforms both the image and corresponding" ]
[ "parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ,", "KIND, either express or implied. # See the License for the specific language", "flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of noisy samples per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of the", "Unless required by applicable law or agreed to in writing, software # distributed", "flags.DEFINE_integer(\"subject\", 0, \"Index of the subject for training.\") # Model Parameters flags.DEFINE_enum(\"model\", \"nasa\",", "2, 1]) t = -tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[...,", "for k in datasets.dataset_dict.keys()), \"Name of the dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory to load", "flags.DEFINE_bool(\"use_joint\", True, \"Whether to use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of vertex samples.\")", "def amass_name_helper(name): name, frame = name.split(\"-\") subject = name[:5] motion = name[6:] return", "trimesh tf.disable_eager_execution() tfd = tfp.distributions def define_flags(): \"\"\"Define command line flags.\"\"\" flags =", "axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors", "line flags.\"\"\" flags = tf.app.flags # Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k for k", "the dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory to load data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number", "unused_subject, unused_motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth):", "flags.DEFINE_float(\"trans_range\", 1., \"The range of allowed translations.\") def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent,", "\"reparam\", [\"vanilla\", \"reparam\"], \"Type of gradient to use in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024,", "hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert > 0:", "rec_val, glue_val = sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description(\"Frame_{0} {1:.4f}|{2:.4f}\".format( k, rec_val,", "save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number of steps to save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight", "meshes only.\") # Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of", "latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss,", "pointcloud: fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name): name, frame = name.split(\"-\") subject =", ":n_dims, -1:] r = tf.transpose(r_inv, [0, 2, 1]) t = -tf.matmul(r, t_inv) transforms", "\"Number of bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of surface samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch", "amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\"", "= point.shape[2] preds = [] for start in range(0, n_points, 100000): feed_dict[point_holder] =", "the mesh for training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether to use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048,", "tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A gradient estimaor for the pose,", "= tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans)", "= tf.app.flags # Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k for k in datasets.dataset_dict.keys()), \"Name", "flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of surface samples.\") flags.DEFINE_integer(\"batch_size\",", "= hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert >", "import numpy as np from skimage import measure import tensorflow.compat.v1 as tf from", "* 0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] != 0: orig_points =", "0.5) * scale points = points * gt_scale + gt_center n_points = points.shape[1]", "vectors in current configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance between", "= hparams.n_dims # Invert the transformation r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv =", "-= 1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] -", "as a glue loss.\"\"\" n_dims = hparams.n_dims # Invert the transformation r_inv =", "flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\") flags.DEFINE_integer(\"motion\", 0, \"Index of the motion for evaluation.\") flags.DEFINE_integer(\"subject\",", "motion, frame, mesh_model def save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud to disk.\"\"\" name =", "this file except in compliance with the License. # You may obtain a", "\"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number of optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number of", "label, hparams): \"\"\"Compute IoU.\"\"\" iou = 0. eps = 1e-9 latent_val = sess.run(latent,", ":n_dims] # Compute vectors in current configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) #", "= \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: pointcloud = data[\"vert\"].reshape([-1, 3]) for", "level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The dimension of the query points.\") # Training Parameters flags.DEFINE_float(\"lr\",", "using the reparam trick.\"\"\" sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval", "os import path import numpy as np from skimage import measure import tensorflow.compat.v1", "feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=i) mesh_name = \"full_pred.obj\" if mesh_model", "n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert noises = dist.sample((1, hparams.n_parts,", "unused_var1, occ = model_fn(batch_holder, None, None, \"gen_mesh\") return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ", "as tfp from tqdm import trange import trimesh tf.disable_eager_execution() tfd = tfp.distributions def", "# You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0", "name = data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame)", "subject, motion, frame def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ): feed_dict = {}", "tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points", "ANY KIND, either express or implied. # See the License for the specific", "to load data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024,", "to save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight of labed vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight", "value = sess.run(occ[:, idx], feed_dict) values.append(value) values = np.concatenate(values, axis=1) values = values[0,", "weights unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, \"gen_mesh\") return latent_holder, latent, occ_eval,", "\"gen_mesh\") return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams):", "+ 100000], axis=1) value = sess.run(occ[:, idx], feed_dict) values.append(value) values = np.concatenate(values, axis=1)", "tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of father joints and apply it to vectors", "points batch_holder[\"weight\"] = weights unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, \"gen_mesh\") return", "/ mesh_extractor.resolution - 0.5) * scale points = points * gt_scale + gt_center", "= tfp.distributions def define_flags(): \"\"\"Define command line flags.\"\"\" flags = tf.app.flags # Dataset", "gt_scale + gt_center n_points = points.shape[1] values = [] for i in range(0,", "return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A vanilla gradient estimator for", "unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, \"gen_mesh\") return latent_holder, latent, occ_eval, tf.reduce_mean(", "hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None, \"gen_mesh\") occ = tf.reshape(occ, [1,", "it to vectors from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts,", "r = tf.transpose(r_inv, [0, 2, 1]) t = -tf.matmul(r, t_inv) transforms = tf.concat(", "term as a glue loss.\"\"\" n_dims = hparams.n_dims # Invert the transformation r_inv", "# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License,", "a glue loss.\"\"\" n_dims = hparams.n_dims # Invert the transformation r_inv = inv_transforms[...,", "License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "command line flags.\"\"\" flags = tf.app.flags # Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k for", "feed_dict = {} feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou return feed_dict def parse_global_step(ckpt):", "= sess.run(latent, feed_dict) n_points = point.shape[2] preds = [] for start in range(0,", "weights = batch_holder[\"weight\"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert,", "minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices,", "def save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, pth=\"meshes\"): \"\"\"Generate and save", "of minimal loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether to use vertices on the mesh for", "flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of optimization steps for tracking each", "data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not", "with trange(hparams.max_steps_per_frame) as t: for unused_i in t: loss_val, unused_var, rec_val, glue_val =", "batch_holder[\"weight\"] = weights unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, \"gen_mesh\") return latent_holder,", "and apply it to vectors from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) *", "# Model Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k for k in models.model_dict.keys()), \"Name of the", "tfd = tfp.distributions def define_flags(): \"\"\"Define command line flags.\"\"\" flags = tf.app.flags #", "axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of father joints and apply", "points batch_holder[\"weight\"] = weights dist = tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert", "frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo", "glue loss.\"\"\" n_dims = hparams.n_dims # Invert the transformation r_inv = inv_transforms[..., :n_dims,", "tracking each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"], \"Type of gradient to use in", "copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "under the License. \"\"\"General helper functions.\"\"\" from os import path import numpy as", "0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] != 0: orig_points = points", "name, frame = name.split(\"-\") subject = name[:5] motion = name[6:] return subject, motion,", "= 0 glue_val = 0 with trange(hparams.max_steps_per_frame) as t: for unused_i in t:", "3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5", "one. level_set = hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3, level_set)", "given a trained NASA model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame = amass_name_helper(name)", "feed_dict, latent_holder, point_holder, latent, occ, point, label, hparams): \"\"\"Compute IoU.\"\"\" iou = 0.", "n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ", "amass_name_helper(name): name, frame = name.split(\"-\") subject = name[:5] motion = name[6:] return subject,", "dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory to load data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of", "OF ANY KIND, either express or implied. # See the License for the", "): feed_dict = {} feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou return feed_dict def", "subject = name[:5] motion = name[6:] return subject, motion, frame def make_summary_feed_dict( iou_hook,", "point, label, hparams): \"\"\"Compute IoU.\"\"\" iou = 0. eps = 1e-9 latent_val =", ":n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv, [0, 2, 1])", "1, 1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed =", "return int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ, point, label, hparams): \"\"\"Compute", "n_dims]) # Compute distance between links and transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed))", "(label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps)", "sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2) pred = (pred >= hparams.level_set).astype(np.float32) label", "{2}\\n\".format(*v.tolist())) def amass_name_helper(name): name, frame = name.split(\"-\") subject = name[:5] motion = name[6:]", "end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors in current configuration pred_links =", "padded bbox regarding the tight one. level_set = hparams.level_set latent_val = sess.run(latent, feed_dict)", "of the query points.\") # Training Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\", None,", "= tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights unused_var0, unused_var1,", "latent, occ, batch_val, hparams, pth=\"meshes\"): \"\"\"Generate and save meshes to disk given a", "\"full_pred.obj\" if mesh_model is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: mesh_model.export(fout,", "the pose, theta, during tracking.\"\"\" sess.run(reset_op) loss_val = 0 glue_val = 0 with", "flags.DEFINE_float(\"level_set\", 0.5, \"The value of the level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The dimension of the", "amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts", "= tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance between links and transformed vectors return", "data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight of length constraint loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The range of", "model_fn(batch_holder, noises, None, \"gen_mesh\") occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples,", "OOM due to points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i +", "\"Number of steps to save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number of steps to save", "return None def save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, pth=\"meshes\"): \"\"\"Generate", "= mise.MISE(32, 3, level_set) points = mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1, 3]) gt_bbox =", "occ, batch_val, hparams, pth=\"meshes\"): \"\"\"Generate and save meshes to disk given a trained", "point_holder, latent, occ, batch_val, hparams, idx=i) mesh_name = \"full_pred.obj\" if mesh_model is not", "tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: mesh_model.export(fout, file_type=\"obj\") return subject, motion, frame, mesh_model def", "shape features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The value of the level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The dimension", "mesh_model def save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud to disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\") unused_subject,", "latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert > 0: points =", "- hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A gradient estimaor for the pose, theta,", "axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of", "glue_loss, sess, k, hparams): \"\"\"Optimize the pose, theta, during tracking.\"\"\" sess.run(reset_op) loss_val =", "while points.shape[0] != 0: orig_points = points points = points.astype(np.float32) points = (np.expand_dims(points,", "to use left side transformation (True) or right side (False).\") flags.DEFINE_string(\"joint_data\", None, \"Path", "sess.run(reset_op) loss_val = 0 glue_val = 0 with trange(hparams.max_steps_per_frame) as t: for unused_i", "axis=-1)[..., :n_dims] # Compute vectors in current configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims])", "of the level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The dimension of the query points.\") # Training", "weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights dist", "flags.DEFINE_enum(\"model\", \"nasa\", list(k for k in models.model_dict.keys()), \"Name of the model.\") flags.DEFINE_integer(\"n_parts\", 24,", "pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\")", "rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of optimization steps for tracking each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\",", "path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for i in", "faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals verts -= 1", "to save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number of steps to save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5,", "optimization steps for tracking each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"], \"Type of gradient", "\"w\") as fout: mesh_model.export(fout, file_type=\"obj\") return subject, motion, frame, mesh_model def save_pointcloud(data, hparams,", "compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): \"\"\"Compute the prior term as a glue", "latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss,", "flags.DEFINE_integer(\"n_parts\", 24, \"Number of parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of the latent vector (in", "= latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1) value = sess.run(occ[:, idx],", "return subject, motion, frame, mesh_model def save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud to disk.\"\"\"", "each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of noisy samples per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth", "- 0.5) * scale points = points * gt_scale + gt_center n_points =", "], dtype=np.float32) verts = scale * (verts - 0.5) verts = verts *", "directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number of optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number of steps", "mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0]", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "value_grid, min(level_set, value_grid.max())) del normals verts -= 1 verts /= np.array([ value_grid.shape[0] -", "to blend parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether to use projected shape features.\") flags.DEFINE_float(\"level_set\", 0.5,", "gaussian noises.\") flags.DEFINE_bool( \"left_trans\", False, \"Whether to use left side transformation (True) or", "a trained NASA model.\"\"\" scale = 1.1 # Scale of the padded bbox", "trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: # pylint: disable=bare-except return None def save_mesh(sess, feed_dict,", "if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for i in range(start, hparams.n_parts +", "pose, theta, using the reparam trick.\"\"\" sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder,", "unused_var1, occ = model_fn(batch_holder, noises, None, \"gen_mesh\") occ = tf.reshape(occ, [1, hparams.n_parts +", "at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] != 0: orig_points = points points", "this to prevent OOM due to points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] =", "batch_val, hparams, idx=0): \"\"\"Generating meshes given a trained NASA model.\"\"\" scale = 1.1", "1e-9 latent_val = sess.run(latent, feed_dict) n_points = point.shape[2] preds = [] for start", "= weights unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, \"gen_mesh\") return latent_holder, latent,", "scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert noises = dist.sample((1,", "3]) for v in pointcloud: fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name): name, frame", "measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import datasets", "latent_holder, point_holder, latent, occ, batch_val, hparams, idx=0): \"\"\"Generating meshes given a trained NASA", "import path import numpy as np from skimage import measure import tensorflow.compat.v1 as", "axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute", "maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2,", "tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as tfp from tqdm import trange import trimesh", "latent_holder, point_holder, latent, occ, batch_val, hparams, pth=\"meshes\"): \"\"\"Generate and save meshes to disk", "list(k for k in datasets.dataset_dict.keys()), \"Name of the dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory to", "# limitations under the License. \"\"\"General helper functions.\"\"\" from os import path import", "= inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv, [0,", "of the subject for training.\") # Model Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k for k", "frame def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ): feed_dict = {} feed_dict[iou_hook] =", ">= hparams.level_set).astype(np.float32) label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label)", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "hparams.n_parts + 1, -1, n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder,", "subject, motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth):", "steps to save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number of steps to save checkpoint.\") flags.DEFINE_float(\"label_w\",", "Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to generate meshes only.\") # Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4,", "of the gaussian noises.\") flags.DEFINE_bool( \"left_trans\", False, \"Whether to use left side transformation", "!= 0: orig_points = points points = points.astype(np.float32) points = (np.expand_dims(points, axis=0) /", "n_points, 100000): feed_dict[point_holder] = point[:, :, start:start + 100000] feed_dict[latent_holder] = latent_val pred", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"]", "hparams, pth=\"meshes\"): \"\"\"Generate and save meshes to disk given a trained NASA model.\"\"\"", "iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def compute_glue_loss(connect,", "hparams): \"\"\"Optimize the pose, theta, during tracking.\"\"\" sess.run(reset_op) loss_val = 0 glue_val =", "sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights dist = tfd.Normal(loc=0., scale=sigma)", "model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number of parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of the latent vector", "tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: pointcloud =", "\"Whether to generate meshes only.\") # Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\",", "to disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir,", "0. eps = 1e-9 latent_val = sess.run(latent, feed_dict) n_points = point.shape[2] preds =", "for tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of noisy samples per vertex\") flags.DEFINE_float(\"bandwidth\",", "sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3, level_set) points = mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1,", "= points * gt_scale + gt_center n_points = points.shape[1] values = [] for", "required by applicable law or agreed to in writing, software # distributed under", "not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: mesh_model.export(fout, file_type=\"obj\") return subject, motion,", "1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3", "vertex samples.\") # Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to generate meshes only.\") #", "= data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if", "for start in range(0, n_points, 100000): feed_dict[point_holder] = point[:, :, start:start + 100000]", "0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def", "100000): # Add this to prevent OOM due to points overload. feed_dict[latent_holder] =", "connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1)", "applicable law or agreed to in writing, software # distributed under the License", "as t: for unused_i in t: loss_val, unused_var, rec_val, glue_val = sess.run( [loss,", "points.shape[1] values = [] for i in range(0, n_points, 100000): # Add this", "to use shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The constant to blend parts.\") flags.DEFINE_bool(\"projection\", True,", "meshes to disk given a trained NASA model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\") subject, motion,", "verts -= 1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2]", "tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A gradient estimaor for the", "hparams, idx=0): \"\"\"Generating meshes given a trained NASA model.\"\"\" scale = 1.1 #", "else hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ =", "data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of", "from os import path import numpy as np from skimage import measure import", "\"Learning rate\") flags.DEFINE_string(\"train_dir\", None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number of optimization steps.\")", "minimal loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether to use vertices on the mesh for training.\")", "gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=0): \"\"\"Generating meshes given a", "pointcloud to disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame = amass_name_helper(name) pth =", "model.\"\"\" scale = 1.1 # Scale of the padded bbox regarding the tight", "or agreed to in writing, software # distributed under the License is distributed", "dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None, \"gen_mesh\")", "mesh_extractor.resolution - 0.5) * scale points = points * gt_scale + gt_center n_points", "of optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number of steps to save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500,", "pred = np.concatenate(preds, axis=2) pred = (pred >= hparams.level_set).astype(np.float32) label = (label[:, :1]", "tfp from tqdm import trange import trimesh tf.disable_eager_execution() tfd = tfp.distributions def define_flags():", "\"Number of steps to save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight of labed vertices loss.\")", "+ 1, -1, n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent,", "0 with trange(hparams.max_steps_per_frame) as t: for unused_i in t: loss_val, unused_var, rec_val, glue_val", "5000, \"Number of steps to save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number of steps to", "faces=faces) return mesh except: # pylint: disable=bare-except return None def save_mesh(sess, feed_dict, latent_holder,", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "dtype=np.float32) verts = scale * (verts - 0.5) verts = verts * gt_scale", "\"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: pointcloud = data[\"vert\"].reshape([-1, 3]) for v", "samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\") flags.DEFINE_integer(\"motion\", 0, \"Index of the motion for evaluation.\")", "range(0, n_points, 100000): # Add this to prevent OOM due to points overload.", "dimension of the query points.\") # Training Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\",", "the gaussian noises.\") flags.DEFINE_bool( \"left_trans\", False, \"Whether to use left side transformation (True)", "value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts = scale * (verts", "100000] feed_dict[latent_holder] = latent_val pred = sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2)", "\"Whether to use vertices on the mesh for training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether to", "flags.DEFINE_float(\"glue_w\", 20., \"Weight of length constraint loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The range of allowed", "sess.run(occ[:, idx], feed_dict) values.append(value) values = np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64)", "occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert > 0: points = batch_holder[\"point\"]", "for i in range(start, hparams.n_parts + 1): mesh_model = gen_mesh( sess, feed_dict, latent_holder,", "except: # pylint: disable=bare-except return None def save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ,", "= np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "motion, frame def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ): feed_dict = {} feed_dict[iou_hook]", "writing, software # distributed under the License is distributed on an \"AS IS\"", "for the pose, theta.\"\"\" latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if", "occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set))", "tensorflow_probability as tfp from tqdm import trange import trimesh tf.disable_eager_execution() tfd = tfp.distributions", "1792, \"Number of optimization steps for tracking each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"],", "use vertices on the mesh for training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether to use joint-based", "end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): \"\"\"Compute the prior term as a glue loss.\"\"\"", "from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as tfp from tqdm import trange import", "t_inv) transforms = tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms =", "sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2,", "n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert", "vertices on the mesh for training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether to use joint-based transformation.\")", "tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss, sess, k, hparams):", "License. # You may obtain a copy of the License at # #", "scale points = points * gt_scale + gt_center n_points = points.shape[1] values =", "values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense()", "preds.append(pred) pred = np.concatenate(preds, axis=2) pred = (pred >= hparams.level_set).astype(np.float32) label = (label[:,", "-1, n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean(", "\"Batch size.\") flags.DEFINE_integer(\"motion\", 0, \"Index of the motion for evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index", "iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): \"\"\"Compute the prior term as", "= tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] =", "\"The range of allowed translations.\") def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val,", "= best_iou return feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split(\"-\")[-1]) def compute_iou(sess,", "feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1) value = sess.run(occ[:, idx], feed_dict) values.append(value)", "3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts = scale *", "and save meshes to disk given a trained NASA model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\")", "skimage import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib", "# Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of optimization steps", "iou feed_dict[best_hook] = best_iou return feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split(\"-\")[-1])", "sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description(\"Frame_{0} {1:.4f}|{2:.4f}\".format( k, rec_val, glue_val)) return loss_val,", "= measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals verts -= 1 verts /= np.array([", "n_samples, hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None, \"gen_mesh\") occ = tf.reshape(occ,", "compliance with the License. # You may obtain a copy of the License", "points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1) value", "tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors in current configuration pred_links = tf.reshape(joints, [hparams.n_parts,", "\"w\") as fout: pointcloud = data[\"vert\"].reshape([-1, 3]) for v in pointcloud: fout.write(\"v {0}", "0: points = batch_holder[\"point\"] weights = batch_holder[\"weight\"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1,", "k, hparams): \"\"\"Optimize the pose, theta, during tracking.\"\"\" sess.run(reset_op) loss_val = 0 glue_val", "\"Whether to use shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The constant to blend parts.\") flags.DEFINE_bool(\"projection\",", "5., \"The constant to blend parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether to use projected shape", "inv_first_frame_trans, joints, hparams): \"\"\"Compute the prior term as a glue loss.\"\"\" n_dims =", "inv_transforms[..., -1:, :]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of father", "functions.\"\"\" from os import path import numpy as np from skimage import measure", "k in datasets.dataset_dict.keys()), \"Name of the dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory to load data", "between links and transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams):", "batch_holder, hparams): \"\"\"A vanilla gradient estimator for the pose, theta.\"\"\" latent_holder, latent, occ_eval", "tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as tfp from tqdm", "in datasets.dataset_dict.keys()), \"Name of the dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory to load data from.\")", "def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ, point, label, hparams): \"\"\"Compute IoU.\"\"\" iou", "latent, occ, point, label, hparams): \"\"\"Compute IoU.\"\"\" iou = 0. eps = 1e-9", "of steps to save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight of labed vertices loss.\") flags.DEFINE_float(\"minimal_w\",", "save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight of labed vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of", "3, level_set) points = mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)],", "points = mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center", "= {} feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou return feed_dict def parse_global_step(ckpt): basename", "= tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2)", "= data[\"vert\"].reshape([-1, 3]) for v in pointcloud: fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name):", "hparams.n_parts + 1): mesh_model = gen_mesh( sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val,", "basename = path.basename(ckpt) return int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ, point,", "feed_dict[latent_holder] = latent_val pred = sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2) pred", "Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version", "\"Bandwidth of the gaussian noises.\") flags.DEFINE_bool( \"left_trans\", False, \"Whether to use left side", "points = points.astype(np.float32) points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * scale", "\"reparam\"], \"Type of gradient to use in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of", "tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss, sess, k,", "tracking.\"\"\" sess.run(reset_op) loss_val = 0 glue_val = 0 with trange(hparams.max_steps_per_frame) as t: for", "for unused_i in t: loss_val, unused_var, rec_val, glue_val = sess.run( [loss, train_op, rec_loss,", "= batch_val[\"vert\"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] + gt_bbox[1])", "None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number of optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number", "side (False).\") flags.DEFINE_string(\"joint_data\", None, \"Path to load joint data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight of", "model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert > 0: points = batch_holder[\"point\"] weights =", "flags.DEFINE_string(\"train_dir\", None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number of optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000,", "feed_dict) mesh_extractor = mise.MISE(32, 3, level_set) points = mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1, 3])", "faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces)", "unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None, \"gen_mesh\") occ = tf.reshape(occ, [1, hparams.n_parts", "batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights", "axis=0) / mesh_extractor.resolution - 0.5) * scale points = points * gt_scale +", "hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts, n_samples,", "of vertex samples for tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of noisy samples", "optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of vertex samples for tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8,", "1], faces[..., 0], faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh except:", "name.split(\"-\") subject = name[:5] motion = name[6:] return subject, motion, frame def make_summary_feed_dict(", "+= np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def compute_glue_loss(connect, end_pts,", "tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability", "= model_fn(batch_holder, None, None, \"gen_mesh\") return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set))", "= 0 with trange(hparams.max_steps_per_frame) as t: for unused_i in t: loss_val, unused_var, rec_val,", "0.5, \"The value of the level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The dimension of the query", "for tracking each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"], \"Type of gradient to use", "meshes given a trained NASA model.\"\"\" scale = 1.1 # Scale of the", "constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals verts", "t: loss_val, unused_var, rec_val, glue_val = sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description(\"Frame_{0}", "points points = points.astype(np.float32) points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) *", "import models import tensorflow_probability as tfp from tqdm import trange import trimesh tf.disable_eager_execution()", "on the mesh for training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether to use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\",", "hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3, level_set) points = mesh_extractor.query()", "transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of vertex samples.\") # Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether", "tight one. level_set = hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3,", "NASA model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir,", "t = -tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]],", "point_holder, latent, occ, batch_val, hparams, idx=0): \"\"\"Generating meshes given a trained NASA model.\"\"\"", "not use this file except in compliance with the License. # You may", "gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale", "idx=0): \"\"\"Generating meshes given a trained NASA model.\"\"\" scale = 1.1 # Scale", "for the specific language governing permissions and # limitations under the License. \"\"\"General", "if hparams.sample_vert > 0: points = batch_holder[\"point\"] weights = batch_holder[\"weight\"] n_vert = tf.shape(points)[2]", "1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms,", "gt_verts = batch_val[\"vert\"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] +", "to disk given a trained NASA model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame", "tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights dist = tfd.Normal(loc=0.,", "License, Version 2.0 (the \"License\"); # you may not use this file except", "vectors from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]),", "= gen_mesh( sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=i) mesh_name =", "in pointcloud: fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name): name, frame = name.split(\"-\") subject", "feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, pth=\"meshes\"): \"\"\"Generate and save meshes to", "= (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)),", "t_inv = inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv, [0, 2, 1]) t =", "idx=i) mesh_name = \"full_pred.obj\" if mesh_model is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\")", "import mise from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as", "Compute transformations of father joints and apply it to vectors from frame0 father_transforms", "tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3)", "Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of optimization steps for tracking", "flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of vertex samples.\") # Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to", "axis=1) value = sess.run(occ[:, idx], feed_dict) values.append(value) values = np.concatenate(values, axis=1) values =", "= hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts,", "k in models.model_dict.keys()), \"Name of the model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number of parts.\") flags.DEFINE_integer(\"total_dim\",", "3, \"The dimension of the query points.\") # Training Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1) value = sess.run(occ[:, idx], feed_dict)", "\"Directory to load data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of bbox samples.\") flags.DEFINE_integer(\"sample_surf\",", "the subject for training.\") # Model Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k for k in", "load joint data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight of length constraint loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The", "idx], feed_dict) values.append(value) values = np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points,", "= iou feed_dict[best_hook] = best_iou return feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt) return", "np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def compute_glue_loss(connect, end_pts, inv_transforms,", "n_dims = hparams.n_dims # Invert the transformation r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv", "= tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def", "24, \"Number of parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of the latent vector (in total).\")", "flags.DEFINE_bool(\"projection\", True, \"Whether to use projected shape features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The value of", "- 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts = scale", "latent_val = sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3, level_set) points = mesh_extractor.query() gt_verts", "# # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "joints and apply it to vectors from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1)", "https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "# you may not use this file except in compliance with the License.", "gt_bbox[0]).max() while points.shape[0] != 0: orig_points = points points = points.astype(np.float32) points =", "None def save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, pth=\"meshes\"): \"\"\"Generate and", "batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not", "= sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2) pred = (pred >= hparams.level_set).astype(np.float32)", "= amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start =", "gt_center faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts,", "agreed to in writing, software # distributed under the License is distributed on", "of parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of the latent vector (in total).\") flags.DEFINE_bool(\"shared_decoder\", False,", "the latent vector (in total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to use shared decoder.\") flags.DEFINE_float(\"soft_blend\",", "np.expand_dims(points[:, i:i + 100000], axis=1) value = sess.run(occ[:, idx], feed_dict) values.append(value) values =", "numpy as np from skimage import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise", "\"\"\"Compute the prior term as a glue loss.\"\"\" n_dims = hparams.n_dims # Invert", "values.append(value) values = np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points", "normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals verts -= 1 verts", "0, \"Index of the motion for evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index of the subject", "100000): feed_dict[point_holder] = point[:, :, start:start + 100000] feed_dict[latent_holder] = latent_val pred =", "tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) #", "None, \"gen_mesh\") occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1]) occ", "vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A vanilla gradient estimator for the pose, theta.\"\"\" latent_holder, latent,", "tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for i in range(start, hparams.n_parts + 1): mesh_model", "the model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number of parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of the latent", "pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for i in range(start,", "motion for evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index of the subject for training.\") # Model", "model_fn(batch_holder, None, None, \"gen_mesh\") return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def", "batch_holder[\"point\"] weights = batch_holder[\"weight\"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0,", "limitations under the License. \"\"\"General helper functions.\"\"\" from os import path import numpy", "obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless", "(the \"License\"); # you may not use this file except in compliance with", "# pylint: disable=bare-except return None def save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val,", "iou, best_hook, best_iou, ): feed_dict = {} feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou", "\"Name of the model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number of parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of", "= points points = points.astype(np.float32) points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5)", "best_hook, best_iou, ): feed_dict = {} feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou return", ":1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] #", "transformation (True) or right side (False).\") flags.DEFINE_string(\"joint_data\", None, \"Path to load joint data.\")", "for i in range(0, n_points, 100000): # Add this to prevent OOM due", "latent_val = sess.run(latent, feed_dict) n_points = point.shape[2] preds = [] for start in", "axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op,", "\"Whether to use left side transformation (True) or right side (False).\") flags.DEFINE_string(\"joint_data\", None,", "\"Dimension of the latent vector (in total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to use shared", "= hparams.n_parts for i in range(start, hparams.n_parts + 1): mesh_model = gen_mesh( sess,", "transforms = tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms = tf.matmul(transforms,", "n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None, \"gen_mesh\") occ =", "occ, batch_val, hparams, idx=0): \"\"\"Generating meshes given a trained NASA model.\"\"\" scale =", "save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, pth=\"meshes\"): \"\"\"Generate and save meshes", "# Unless required by applicable law or agreed to in writing, software #", "vanilla gradient estimator for the pose, theta.\"\"\" latent_holder, latent, occ_eval = model_fn(batch_holder, None,", "(False).\") flags.DEFINE_string(\"joint_data\", None, \"Path to load joint data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight of length", "= verts * gt_scale + gt_center faces = np.stack([faces[..., 1], faces[..., 0], faces[...,", "by applicable law or agreed to in writing, software # distributed under the", "def save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud to disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion,", "import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import datasets from", "{1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name): name, frame = name.split(\"-\") subject = name[:5] motion =", "tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices,", "return subject, motion, frame def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ): feed_dict =", "flags.DEFINE_bool(\"use_vert\", True, \"Whether to use vertices on the mesh for training.\") flags.DEFINE_bool(\"use_joint\", True,", "\"\"\"Generate and save meshes to disk given a trained NASA model.\"\"\" name =", "of allowed translations.\") def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=0):", "orig_points = points points = points.astype(np.float32) points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution -", "batch_val, hparams, idx=i) mesh_name = \"full_pred.obj\" if mesh_model is not None: with tf.io.gfile.GFile(path.join(pth,", "tf.app.flags # Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k for k in datasets.dataset_dict.keys()), \"Name of", "= point[:, :, start:start + 100000] feed_dict[latent_holder] = latent_val pred = sess.run(occ, feed_dict)", "axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])],", "occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A gradient estimaor for", "- end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A vanilla gradient estimator for the pose,", "batch_holder[\"weight\"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points", "tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors in current configuration", "disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth,", "\"Index of the subject for training.\") # Model Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k for", "\"Weight of length constraint loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The range of allowed translations.\") def", "960, \"Dimension of the latent vector (in total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to use", "# Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k for k in datasets.dataset_dict.keys()), \"Name of the", "values = np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points =", "= tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors in current configuration pred_links = tf.reshape(joints,", "sess, k, hparams): \"\"\"Optimize the pose, theta, during tracking.\"\"\" sess.run(reset_op) loss_val = 0", "verts * gt_scale + gt_center faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]],", "(gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] != 0: orig_points = points points = points.astype(np.float32)", "flags.DEFINE_float(\"label_w\", 0.5, \"Weight of labed vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of minimal loss.\")", "save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud to disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame", "projected shape features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The value of the level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The", "bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of surface samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\") flags.DEFINE_integer(\"motion\",", "points = batch_holder[\"point\"] weights = batch_holder[\"weight\"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1,", "for k in models.model_dict.keys()), \"Name of the model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number of parts.\")", "1.1 # Scale of the padded bbox regarding the tight one. level_set =", "file except in compliance with the License. # You may obtain a copy", "+ gt_center n_points = points.shape[1] values = [] for i in range(0, n_points,", "end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors in", "points.shape[0] != 0: orig_points = points points = points.astype(np.float32) points = (np.expand_dims(points, axis=0)", "Compute vectors in current configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance", "constant to blend parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether to use projected shape features.\") flags.DEFINE_float(\"level_set\",", "\"\"\"A vanilla gradient estimator for the pose, theta.\"\"\" latent_holder, latent, occ_eval = model_fn(batch_holder,", "= tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points,", "weights dist = tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert > 0 else", "trained NASA model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame = amass_name_helper(name) pth =", "value_grid.shape[2] - 3 ], dtype=np.float32) verts = scale * (verts - 0.5) verts", "start in range(0, n_points, 100000): feed_dict[point_holder] = point[:, :, start:start + 100000] feed_dict[latent_holder]", "estimator for the pose, theta.\"\"\" latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\")", "\"gen_mesh\") occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1]) occ =", "= inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv, [0, 2, 1]) t = -tf.matmul(r,", "labed vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of minimal loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether to", "frame = name.split(\"-\") subject = name[:5] motion = name[6:] return subject, motion, frame", "point.shape[2] preds = [] for start in range(0, n_points, 100000): feed_dict[point_holder] = point[:,", "tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: pointcloud = data[\"vert\"].reshape([-1, 3]) for v in pointcloud:", "License for the specific language governing permissions and # limitations under the License.", "train_op, rec_loss, glue_loss, sess, k, hparams): \"\"\"Optimize the pose, theta, during tracking.\"\"\" sess.run(reset_op)", "# Invert the transformation r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims,", "> 0 else hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1,", "flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number of optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number of steps to", "to in writing, software # distributed under the License is distributed on an", "tf.transpose(r_inv, [0, 2, 1]) t = -tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r, t],", "glue_val = sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description(\"Frame_{0} {1:.4f}|{2:.4f}\".format( k, rec_val, glue_val))", "inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv, [0, 2,", "try: value_grid = np.pad(value_grid, 1, \"constant\", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner(", "implied. # See the License for the specific language governing permissions and #", "models.model_dict.keys()), \"Name of the model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number of parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension", "\"Weight of minimal loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether to use vertices on the mesh", "end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A vanilla gradient estimator for the pose, theta.\"\"\"", "\"License\"); # you may not use this file except in compliance with the", "points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * scale points = points", "theta.\"\"\" latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert > 0:", "steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number of steps to save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number of", "from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import models import", "flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to generate meshes only.\") # Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning", "helper functions.\"\"\" from os import path import numpy as np from skimage import", "n_points, 100000): # Add this to prevent OOM due to points overload. feed_dict[latent_holder]", "joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of vertex samples.\") # Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False,", "return iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): \"\"\"Compute the prior term", "distance between links and transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder,", "weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights unused_var0,", "[1, hparams.n_parts + 1, -1, n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return", "bbox regarding the tight one. level_set = hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor", "normals verts -= 1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3,", "8, \"Number of noisy samples per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of the gaussian", "verts = verts * gt_scale + gt_center faces = np.stack([faces[..., 1], faces[..., 0],", "value_grid = mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1, \"constant\", constant_values=-1e6) verts, faces, normals,", "= name[6:] return subject, motion, frame def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ):", "of gradient to use in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of vertex samples", "Training Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\", None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000,", "points.\") # Training Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\", None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\")", "hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None, \"gen_mesh\") occ", "generate meshes only.\") # Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number", "tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import models", "for training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether to use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of", "length constraint loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The range of allowed translations.\") def gen_mesh(sess, feed_dict,", "= points batch_holder[\"weight\"] = weights unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, \"gen_mesh\")", "- gt_bbox[0]).max() while points.shape[0] != 0: orig_points = points points = points.astype(np.float32) points", "loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The range of allowed translations.\") def gen_mesh(sess, feed_dict, latent_holder, point_holder,", "np.concatenate(preds, axis=2) pred = (pred >= hparams.level_set).astype(np.float32) label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1)", "or implied. # See the License for the specific language governing permissions and", "= points.shape[1] values = [] for i in range(0, n_points, 100000): # Add", "value_grid = np.pad(value_grid, 1, \"constant\", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid,", "motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth)", "np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): \"\"\"Compute", "(in total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to use shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The constant", "to use projected shape features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The value of the level_set.\") flags.DEFINE_integer(\"n_dims\",", "= points.astype(np.float32) points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * scale points", "frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as", "configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance between links and transformed", "axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: # pylint: disable=bare-except return None", "= tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights dist =", "reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A gradient estimaor for the pose, theta, using the reparam", "fout: mesh_model.export(fout, file_type=\"obj\") return subject, motion, frame, mesh_model def save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of surface samples.\")", "\"\"\"Compute IoU.\"\"\" iou = 0. eps = 1e-9 latent_val = sess.run(latent, feed_dict) n_points", "None, \"gen_mesh\") if hparams.sample_vert > 0: points = batch_holder[\"point\"] weights = batch_holder[\"weight\"] n_vert", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "hparams): \"\"\"A gradient estimaor for the pose, theta, using the reparam trick.\"\"\" sigma", "pose, theta, during tracking.\"\"\" sess.run(reset_op) loss_val = 0 glue_val = 0 with trange(hparams.max_steps_per_frame)", "permissions and # limitations under the License. \"\"\"General helper functions.\"\"\" from os import", "return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op,", "= -tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2)", "= tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors in current", "frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name", "(gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] !=", "tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims]", "unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals verts -= 1 verts /=", "tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts,", "in writing, software # distributed under the License is distributed on an \"AS", "+ gt_center faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) mesh =", "and transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A vanilla", "list(k for k in models.model_dict.keys()), \"Name of the model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number of", "hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A gradient estimaor for the pose, theta, using", "axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid =", "False, \"Whether to use left side transformation (True) or right side (False).\") flags.DEFINE_string(\"joint_data\",", "1, \"constant\", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del", "mesh_model = gen_mesh( sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=i) mesh_name", "mesh_model is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: mesh_model.export(fout, file_type=\"obj\") return", "{0} {1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name): name, frame = name.split(\"-\") subject = name[:5] motion", "= np.expand_dims(points[:, i:i + 100000], axis=1) value = sess.run(occ[:, idx], feed_dict) values.append(value) values", "point_holder, latent, occ, point, label, hparams): \"\"\"Compute IoU.\"\"\" iou = 0. eps =", "pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance between links and transformed vectors", "batch_holder[\"weight\"] = weights dist = tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert >", "pose, theta.\"\"\" latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert >", "Invert the transformation r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims, -1:]", "verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ],", "of length constraint loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The range of allowed translations.\") def gen_mesh(sess,", "sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights unused_var0, unused_var1, occ =", "noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder, noises,", "1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\", None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number of optimization", "the prior term as a glue loss.\"\"\" n_dims = hparams.n_dims # Invert the", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "loss, reset_op, train_op, rec_loss, glue_loss, sess, k, hparams): \"\"\"Optimize the pose, theta, during", "for evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index of the subject for training.\") # Model Parameters", "[hparams.n_parts, n_dims]) # Compute distance between links and transformed vectors return tf.reduce_sum(tf.square(pred_links -", "= (pred >= hparams.level_set).astype(np.float32) label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred", "scale = 1.1 # Scale of the padded bbox regarding the tight one.", "pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\" with", "faces[..., 0], faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: #", "flags.DEFINE_integer(\"max_steps\", 200000, \"Number of optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number of steps to save", "= tf.transpose(r_inv, [0, 2, 1]) t = -tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r,", "(np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * scale points = points * gt_scale", "200000, \"Number of optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number of steps to save checkpoint.\")", "translations.\") def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=0): \"\"\"Generating meshes", "Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k for k in datasets.dataset_dict.keys()), \"Name of the dataset.\") flags.DEFINE_string(\"data_dir\",", "= mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center =", "fout: pointcloud = data[\"vert\"].reshape([-1, 3]) for v in pointcloud: fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist()))", ":1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return", "rec_loss, glue_loss, sess, k, hparams): \"\"\"Optimize the pose, theta, during tracking.\"\"\" sess.run(reset_op) loss_val", "False, \"Whether to use shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The constant to blend parts.\")", "\"Number of optimization steps for tracking each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"], \"Type", "Add this to prevent OOM due to points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder]", "latent_val pred = sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2) pred = (pred", "samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of surface samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\") flags.DEFINE_integer(\"motion\", 0,", "glue_val = 0 with trange(hparams.max_steps_per_frame) as t: for unused_i in t: loss_val, unused_var,", "checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number of steps to save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight of", "Model Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k for k in models.model_dict.keys()), \"Name of the model.\")", "np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh", "batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights unused_var0, unused_var1, occ = model_fn(batch_holder, None,", "steps to save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight of labed vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05,", "point_holder, latent, occ, batch_val, hparams, pth=\"meshes\"): \"\"\"Generate and save meshes to disk given", "points * gt_scale + gt_center n_points = points.shape[1] values = [] for i", "sess.run(latent, feed_dict) n_points = point.shape[2] preds = [] for start in range(0, n_points,", "theta, during tracking.\"\"\" sess.run(reset_op) loss_val = 0 glue_val = 0 with trange(hparams.max_steps_per_frame) as", "def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ): feed_dict = {} feed_dict[iou_hook] = iou", "None, None, \"gen_mesh\") if hparams.sample_vert > 0: points = batch_holder[\"point\"] weights = batch_holder[\"weight\"]", "tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights", "values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid", "/ np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams):", "evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index of the subject for training.\") # Model Parameters flags.DEFINE_enum(\"model\",", "you may not use this file except in compliance with the License. #", "\"The constant to blend parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether to use projected shape features.\")", "\"The dimension of the query points.\") # Training Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\")", "disable=bare-except return None def save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, pth=\"meshes\"):", "batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights unused_var0, unused_var1, occ = model_fn(batch_holder, None, None,", "mesh_name = \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: pointcloud = data[\"vert\"].reshape([-1, 3])", "transformations of father joints and apply it to vectors from frame0 father_transforms =", "= tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed =", "in t: loss_val, unused_var, rec_val, glue_val = sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict)", "/= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32)", "t], axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute transformations", "with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: pointcloud = data[\"vert\"].reshape([-1, 3]) for v in", "r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv,", "# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "= (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] != 0: orig_points = points points =", "end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed", "to use in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of vertex samples for tracking", "sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=i) mesh_name = \"full_pred.obj\" if", "1, -1, n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval,", "flags.DEFINE_integer(\"save_every\", 5000, \"Number of steps to save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number of steps", "i in range(start, hparams.n_parts + 1): mesh_model = gen_mesh( sess, feed_dict, latent_holder, point_holder,", "occ, point, label, hparams): \"\"\"Compute IoU.\"\"\" iou = 0. eps = 1e-9 latent_val", "axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo)", "= (gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0]", "values = [] for i in range(0, n_points, 100000): # Add this to", "False, \"Whether to generate meshes only.\") # Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\")", "mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1, \"constant\", constant_values=-1e6) verts, faces, normals, unused_var =", "not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for i in range(start, hparams.n_parts + 1):", "= np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale =", "batch_val[\"vert\"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] + gt_bbox[1]) *", "feed_dict[best_hook] = best_iou return feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split(\"-\")[-1]) def", "= tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert noises", "gen_mesh( sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=i) mesh_name = \"full_pred.obj\"", "Compute distance between links and transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn,", "trick.\"\"\" sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder, None,", "use this file except in compliance with the License. # You may obtain", "links and transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A", "the level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The dimension of the query points.\") # Training Parameters", "the pose, theta, using the reparam trick.\"\"\" sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples", "\"Name of the dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory to load data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\",", "value_grid.max())) del normals verts -= 1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1]", "None, \"Path to load joint data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight of length constraint loss.\")", "to vectors from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1,", "flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of the latent vector (in total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to", "return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A", "- hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss, sess, k, hparams): \"\"\"Optimize", "transformation r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims, -1:] r =", "vector (in total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to use shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The", "samples for tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of noisy samples per vertex\")", "joint data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight of length constraint loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The range", "2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: # pylint: disable=bare-except return", "features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The value of the level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The dimension of", "feed_dict) values.append(value) values = np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values)", "father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo =", "flags.DEFINE_enum(\"dataset\", \"amass\", list(k for k in datasets.dataset_dict.keys()), \"Name of the dataset.\") flags.DEFINE_string(\"data_dir\", None,", "1e-2, \"Bandwidth of the gaussian noises.\") flags.DEFINE_bool( \"left_trans\", False, \"Whether to use left", "mesh except: # pylint: disable=bare-except return None def save_mesh(sess, feed_dict, latent_holder, point_holder, latent,", "You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 #", "tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed,", "2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0", "training.\") # Model Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k for k in models.model_dict.keys()), \"Name of", "noises, None, \"gen_mesh\") occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1])", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "= path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth,", "tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict,", "\"Number of surface samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\") flags.DEFINE_integer(\"motion\", 0, \"Index of the", "n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points =", "tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[...,", "TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the \"License\");", "- 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts = scale * (verts -", "(pred >= hparams.level_set).astype(np.float32) label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred *", "return mesh except: # pylint: disable=bare-except return None def save_mesh(sess, feed_dict, latent_holder, point_holder,", "int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ, point, label, hparams): \"\"\"Compute IoU.\"\"\"", "latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert > 0: points", "hparams.sample_vert > 0: points = batch_holder[\"point\"] weights = batch_holder[\"weight\"] n_vert = tf.shape(points)[2] sample_indices", "batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights dist = tfd.Normal(loc=0., scale=sigma) n_pts =", "hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud to disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame =", "\"Path to load joint data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight of length constraint loss.\") flags.DEFINE_float(\"trans_range\",", "start = hparams.n_parts for i in range(start, hparams.n_parts + 1): mesh_model = gen_mesh(", "Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\", None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number", "pylint: disable=bare-except return None def save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams,", "to points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1)", "noisy samples per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of the gaussian noises.\") flags.DEFINE_bool( \"left_trans\",", "sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,", "occ = model_fn(batch_holder, None, None, \"gen_mesh\") return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ -", "500, \"Number of steps to save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight of labed vertices", "2.0 (the \"License\"); # you may not use this file except in compliance", "the padded bbox regarding the tight one. level_set = hparams.level_set latent_val = sess.run(latent,", "batch_holder, hparams): \"\"\"A gradient estimaor for the pose, theta, using the reparam trick.\"\"\"", "scale * (verts - 0.5) verts = verts * gt_scale + gt_center faces", "= trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: # pylint: disable=bare-except return None def save_mesh(sess,", "axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights dist = tfd.Normal(loc=0., scale=sigma) n_pts", "feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=0): \"\"\"Generating meshes given a trained", "= mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1, \"constant\", constant_values=-1e6) verts, faces, normals, unused_var", "\"Number of parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of the latent vector (in total).\") flags.DEFINE_bool(\"shared_decoder\",", "range(0, n_points, 100000): feed_dict[point_holder] = point[:, :, start:start + 100000] feed_dict[latent_holder] = latent_val", "theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of vertex samples for tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\",", "\"Whether to use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of vertex samples.\") # Evalulation", "of steps to save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number of steps to save checkpoint.\")", "preds = [] for start in range(0, n_points, 100000): feed_dict[point_holder] = point[:, :,", "transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A vanilla gradient", "= model_fn(batch_holder, noises, None, \"gen_mesh\") occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1,", "\"\"\"Optimize the pose, theta, during tracking.\"\"\" sess.run(reset_op) loss_val = 0 glue_val = 0", "= tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo = tf.expand_dims(", "t: for unused_i in t: loss_val, unused_var, rec_val, glue_val = sess.run( [loss, train_op,", "decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The constant to blend parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether to use", "1., \"The range of allowed translations.\") def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ,", "joints, hparams): \"\"\"Compute the prior term as a glue loss.\"\"\" n_dims = hparams.n_dims", "0, \"Index of the subject for training.\") # Model Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k", "if mesh_model is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: mesh_model.export(fout, file_type=\"obj\")", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "hparams.n_parts for i in range(start, hparams.n_parts + 1): mesh_model = gen_mesh( sess, feed_dict,", "tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of noisy samples per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2,", "pth=\"meshes\"): \"\"\"Generate and save meshes to disk given a trained NASA model.\"\"\" name", "* scale points = points * gt_scale + gt_center n_points = points.shape[1] values", "batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights dist = tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert", "def define_flags(): \"\"\"Define command line flags.\"\"\" flags = tf.app.flags # Dataset Parameters flags.DEFINE_enum(\"dataset\",", "flags = tf.app.flags # Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k for k in datasets.dataset_dict.keys()),", "gradient to use in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of vertex samples for", "\"Number of vertex samples.\") # Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to generate meshes", "to generate meshes only.\") # Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792,", "= latent_val pred = sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2) pred =", "(verts - 0.5) verts = verts * gt_scale + gt_center faces = np.stack([faces[...,", "None, \"Directory to load data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of bbox samples.\")", "# # Unless required by applicable law or agreed to in writing, software", "\"left_trans\", False, \"Whether to use left side transformation (True) or right side (False).\")", "of noisy samples per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of the gaussian noises.\") flags.DEFINE_bool(", "hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss,", "> 0: points = batch_holder[\"point\"] weights = batch_holder[\"weight\"] n_vert = tf.shape(points)[2] sample_indices =", "from skimage import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from", "express or implied. # See the License for the specific language governing permissions", "-1:] r = tf.transpose(r_inv, [0, 2, 1]) t = -tf.matmul(r, t_inv) transforms =", "axis=2) pred = (pred >= hparams.level_set).astype(np.float32) label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou", "start:start + 100000] feed_dict[latent_holder] = latent_val pred = sess.run(occ, feed_dict) preds.append(pred) pred =", "loss_val = 0 glue_val = 0 with trange(hparams.max_steps_per_frame) as t: for unused_i in", "for training.\") # Model Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k for k in models.model_dict.keys()), \"Name", "save meshes to disk given a trained NASA model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\") subject,", "query points.\") # Training Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\", None, \"Training directory.\")", "0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid = np.pad(value_grid,", "= scale * (verts - 0.5) verts = verts * gt_scale + gt_center", "= amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name =", "hparams.n_parts, 1, 1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed", "def reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A gradient estimaor for the pose, theta, using the", "= batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if", "20., \"Weight of length constraint loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The range of allowed translations.\")", "name[6:] return subject, motion, frame def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ): feed_dict", "\"amass\", list(k for k in datasets.dataset_dict.keys()), \"Name of the dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory", "latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A gradient estimaor", "= sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description(\"Frame_{0} {1:.4f}|{2:.4f}\".format( k, rec_val, glue_val)) return", "Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to generate meshes only.\") # Tracking Parameters flags.DEFINE_float(\"theta_lr\",", "flags.DEFINE_float(\"soft_blend\", 5., \"The constant to blend parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether to use projected", "either express or implied. # See the License for the specific language governing", "= model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert > 0: points = batch_holder[\"point\"] weights", "# Training Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\", None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\",", "feed_dict[point_holder] = point[:, :, start:start + 100000] feed_dict[latent_holder] = latent_val pred = sess.run(occ,", "for v in pointcloud: fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name): name, frame =", "# Scale of the padded bbox regarding the tight one. level_set = hparams.level_set", "\"Type of gradient to use in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of vertex", "(True) or right side (False).\") flags.DEFINE_string(\"joint_data\", None, \"Path to load joint data.\") flags.DEFINE_float(\"glue_w\",", "\"\"\"Define command line flags.\"\"\" flags = tf.app.flags # Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k", "[0, 2, 1]) t = -tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r, t], axis=-1),", "Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of optimization steps for", "parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether to use projected shape features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The value", "frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "use projected shape features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The value of the level_set.\") flags.DEFINE_integer(\"n_dims\", 3,", "value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts =", "= name.split(\"-\") subject = name[:5] motion = name[6:] return subject, motion, frame def", "eps = 1e-9 latent_val = sess.run(latent, feed_dict) n_points = point.shape[2] preds = []", "= 1e-9 latent_val = sess.run(latent, feed_dict) n_points = point.shape[2] preds = [] for", "[tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute", "3, value_grid.shape[2] - 3 ], dtype=np.float32) verts = scale * (verts - 0.5)", "iou_hook, iou, best_hook, best_iou, ): feed_dict = {} feed_dict[iou_hook] = iou feed_dict[best_hook] =", "checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight of labed vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of minimal", "vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of minimal loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether to use", "regarding the tight one. level_set = hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor =", "best_iou return feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict,", "noises.\") flags.DEFINE_bool( \"left_trans\", False, \"Whether to use left side transformation (True) or right", "flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\", None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number of", "frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"], \"Type of gradient to use in theta optimization.\")", "min(level_set, value_grid.max())) del normals verts -= 1 verts /= np.array([ value_grid.shape[0] - 3,", "inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv, [0, 2, 1]) t = -tf.matmul(r, t_inv)", "1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ -", "frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for i in range(start, hparams.n_parts", "unused_motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth)", "point[:, :, start:start + 100000] feed_dict[latent_holder] = latent_val pred = sess.run(occ, feed_dict) preds.append(pred)", "1]) t = -tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:,", "the License. # You may obtain a copy of the License at #", "mise from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as tfp", "= sess.run(occ[:, idx], feed_dict) values.append(value) values = np.concatenate(values, axis=1) values = values[0, :,", "* (verts - 0.5) verts = verts * gt_scale + gt_center faces =", "subject, motion, frame, mesh_model def save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud to disk.\"\"\" name", "hparams.level_set).astype(np.float32) label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label) /", "= (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * scale points = points *", "pointcloud = data[\"vert\"].reshape([-1, 3]) for v in pointcloud: fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist())) def", "import trimesh tf.disable_eager_execution() tfd = tfp.distributions def define_flags(): \"\"\"Define command line flags.\"\"\" flags", "= name[:5] motion = name[6:] return subject, motion, frame def make_summary_feed_dict( iou_hook, iou,", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "constraint loss.\") flags.DEFINE_float(\"trans_range\", 1., \"The range of allowed translations.\") def gen_mesh(sess, feed_dict, latent_holder,", "to use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of vertex samples.\") # Evalulation Parameters", "as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import", "License. \"\"\"General helper functions.\"\"\" from os import path import numpy as np from", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "\"nasa\", list(k for k in models.model_dict.keys()), \"Name of the model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number", "verts = scale * (verts - 0.5) verts = verts * gt_scale +", "= tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:],", "loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether to use vertices on the mesh for training.\") flags.DEFINE_bool(\"use_joint\",", "latent_holder, point_holder, latent, occ, point, label, hparams): \"\"\"Compute IoU.\"\"\" iou = 0. eps", "= 1.1 # Scale of the padded bbox regarding the tight one. level_set", "the License. \"\"\"General helper functions.\"\"\" from os import path import numpy as np", "batch_val, hparams, pth=\"meshes\"): \"\"\"Generate and save meshes to disk given a trained NASA", "hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if", "* gt_scale + gt_center n_points = points.shape[1] values = [] for i in", "in range(start, hparams.n_parts + 1): mesh_model = gen_mesh( sess, feed_dict, latent_holder, point_holder, latent,", "* label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans,", "models import tensorflow_probability as tfp from tqdm import trange import trimesh tf.disable_eager_execution() tfd", "in current configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance between links", "of bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of surface samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\")", "inv_first_frame_trans) # Compute transformations of father joints and apply it to vectors from", "np.pad(value_grid, 1, \"constant\", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max()))", "reparam trick.\"\"\" sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder,", "samples per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of the gaussian noises.\") flags.DEFINE_bool( \"left_trans\", False,", "0 glue_val = 0 with trange(hparams.max_steps_per_frame) as t: for unused_i in t: loss_val,", "current configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance between links and", "data[\"vert\"].reshape([-1, 3]) for v in pointcloud: fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name): name,", "0.5, \"Weight of labed vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of minimal loss.\") flags.DEFINE_bool(\"use_vert\",", "from tqdm import trange import trimesh tf.disable_eager_execution() tfd = tfp.distributions def define_flags(): \"\"\"Define", "feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1) value = sess.run(occ[:,", "mesh for training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether to use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number", "import datasets from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as tfp from tqdm import", "feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder,", "tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance between links and transformed vectors return tf.reduce_sum(tf.square(pred_links", "flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of optimization steps for tracking each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\",", "= path.basename(ckpt) return int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ, point, label,", "is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: mesh_model.export(fout, file_type=\"obj\") return subject,", "total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to use shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The constant to", "use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of vertex samples.\") # Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\",", "5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of optimization steps for tracking each frame.\")", "with the License. # You may obtain a copy of the License at", "the tight one. level_set = hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32,", "path.basename(ckpt) return int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ, point, label, hparams):", "= batch_holder[\"weight\"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32)", "= [] for i in range(0, n_points, 100000): # Add this to prevent", "hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss, sess, k, hparams): \"\"\"Optimize the", "occ, batch_val, hparams, idx=i) mesh_name = \"full_pred.obj\" if mesh_model is not None: with", "mesh_model.export(fout, file_type=\"obj\") return subject, motion, frame, mesh_model def save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud", "flags.\"\"\" flags = tf.app.flags # Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k for k in", "i in range(0, n_points, 100000): # Add this to prevent OOM due to", "in range(0, n_points, 100000): # Add this to prevent OOM due to points", "pth=\"pointcloud\"): \"\"\"Save pointcloud to disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame = amass_name_helper(name)", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "= sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3, level_set) points = mesh_extractor.query() gt_verts =", "from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of surface", ":n_dims] t_inv = inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv, [0, 2, 1]) t", "True, \"Whether to use vertices on the mesh for training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether", "the specific language governing permissions and # limitations under the License. \"\"\"General helper", "in models.model_dict.keys()), \"Name of the model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number of parts.\") flags.DEFINE_integer(\"total_dim\", 960,", "flags.DEFINE_bool( \"left_trans\", False, \"Whether to use left side transformation (True) or right side", "1024, \"Number of surface samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\") flags.DEFINE_integer(\"motion\", 0, \"Index of", "trained NASA model.\"\"\" scale = 1.1 # Scale of the padded bbox regarding", "due to points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000],", "mesh_name = \"full_pred.obj\" if mesh_model is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as", "\"\"\"Save pointcloud to disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\") unused_subject, unused_motion, frame = amass_name_helper(name) pth", "may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # #", "def parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder, latent,", "left side transformation (True) or right side (False).\") flags.DEFINE_string(\"joint_data\", None, \"Path to load", "# Add this to prevent OOM due to points overload. feed_dict[latent_holder] = latent_val", "# Compute vectors in current configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute", "hparams): \"\"\"A vanilla gradient estimator for the pose, theta.\"\"\" latent_holder, latent, occ_eval =", "axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights unused_var0, unused_var1, occ = model_fn(batch_holder,", "occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss, sess,", "trange(hparams.max_steps_per_frame) as t: for unused_i in t: loss_val, unused_var, rec_val, glue_val = sess.run(", "def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): \"\"\"Compute the prior term as a", "np from skimage import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise", "0.5) verts = verts * gt_scale + gt_center faces = np.stack([faces[..., 1], faces[...,", ":, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid =", "side transformation (True) or right side (False).\") flags.DEFINE_string(\"joint_data\", None, \"Path to load joint", "= [] for start in range(0, n_points, 100000): feed_dict[point_holder] = point[:, :, start:start", "\"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of optimization steps for tracking each frame.\") flags.DEFINE_enum(\"gradient_type\",", "tf.disable_eager_execution() tfd = tfp.distributions def define_flags(): \"\"\"Define command line flags.\"\"\" flags = tf.app.flags", "law or agreed to in writing, software # distributed under the License is", "n_points = points.shape[1] values = [] for i in range(0, n_points, 100000): #", "the License for the specific language governing permissions and # limitations under the", "1): mesh_model = gen_mesh( sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=i)", "of the latent vector (in total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to use shared decoder.\")", "-1:, :]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of father joints", "flags.DEFINE_string(\"data_dir\", None, \"Directory to load data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of bbox", "Authors # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while", "with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: mesh_model.export(fout, file_type=\"obj\") return subject, motion, frame, mesh_model", "overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1) value =", "latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A gradient", "size.\") flags.DEFINE_integer(\"motion\", 0, \"Index of the motion for evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index of", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2) pred = (pred >= hparams.level_set).astype(np.float32) label =", "Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k for k in models.model_dict.keys()), \"Name of the model.\") flags.DEFINE_integer(\"n_parts\",", "i:i + 100000], axis=1) value = sess.run(occ[:, idx], feed_dict) values.append(value) values = np.concatenate(values,", "tf.io.gfile.makedirs(pth) start = hparams.n_parts for i in range(start, hparams.n_parts + 1): mesh_model =", "father joints and apply it to vectors from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms,", "value of the level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The dimension of the query points.\") #", "def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=0): \"\"\"Generating meshes given", "np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid", "pred = (pred >= hparams.level_set).astype(np.float32) label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou +=", "hparams.sample_vert > 0 else hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0,", "and # limitations under the License. \"\"\"General helper functions.\"\"\" from os import path", "level_set = hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3, level_set) points", "NASA model.\"\"\" scale = 1.1 # Scale of the padded bbox regarding the", "trange import trimesh tf.disable_eager_execution() tfd = tfp.distributions def define_flags(): \"\"\"Define command line flags.\"\"\"", "3 ], dtype=np.float32) verts = scale * (verts - 0.5) verts = verts", "language governing permissions and # limitations under the License. \"\"\"General helper functions.\"\"\" from", "gradient estimator for the pose, theta.\"\"\" latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,", "tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A vanilla gradient estimator for the", "measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals verts -= 1 verts /= np.array([ value_grid.shape[0]", "load data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024, \"Number of bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number", "# Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to generate meshes only.\") # Tracking Parameters", "flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of vertex samples for tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number", "from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as tfp from", "+ 1): mesh_model = gen_mesh( sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams,", "= tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of father joints and apply it to", "gt_center n_points = points.shape[1] values = [] for i in range(0, n_points, 100000):", "gradient estimaor for the pose, theta, using the reparam trick.\"\"\" sigma = hparams.bandwidth", "prevent OOM due to points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i", "datasets.dataset_dict.keys()), \"Name of the dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory to load data from.\") flags.mark_flag_as_required(\"data_dir\")", "subject for training.\") # Model Parameters flags.DEFINE_enum(\"model\", \"nasa\", list(k for k in models.model_dict.keys()),", "flags.DEFINE_integer(\"n_dims\", 3, \"The dimension of the query points.\") # Training Parameters flags.DEFINE_float(\"lr\", 1e-4,", "to use vertices on the mesh for training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether to use", "steps for tracking each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"], \"Type of gradient to", "eps) return iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): \"\"\"Compute the prior", "prior term as a glue loss.\"\"\" n_dims = hparams.n_dims # Invert the transformation", "# Compute transformations of father joints and apply it to vectors from frame0", "latent, occ, batch_val, hparams, idx=0): \"\"\"Generating meshes given a trained NASA model.\"\"\" scale", "* connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1),", "tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] = weights unused_var0, unused_var1, occ", "compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ, point, label, hparams): \"\"\"Compute IoU.\"\"\" iou =", "in compliance with the License. # You may obtain a copy of the", "as fout: mesh_model.export(fout, file_type=\"obj\") return subject, motion, frame, mesh_model def save_pointcloud(data, hparams, pth=\"pointcloud\"):", "hparams): \"\"\"Compute IoU.\"\"\" iou = 0. eps = 1e-9 latent_val = sess.run(latent, feed_dict)", "The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the", "\"\"\"A gradient estimaor for the pose, theta, using the reparam trick.\"\"\" sigma =", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "loss.\"\"\" n_dims = hparams.n_dims # Invert the transformation r_inv = inv_transforms[..., :n_dims, :n_dims]", "surface samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\") flags.DEFINE_integer(\"motion\", 0, \"Index of the motion for", "each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"], \"Type of gradient to use in theta", "vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of the gaussian noises.\") flags.DEFINE_bool( \"left_trans\", False, \"Whether to", "blend parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether to use projected shape features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The", "given a trained NASA model.\"\"\" scale = 1.1 # Scale of the padded", "= hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3, level_set) points =", "= values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() try:", "name = batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame)", "of the padded bbox regarding the tight one. level_set = hparams.level_set latent_val =", "[\"vanilla\", \"reparam\"], \"Type of gradient to use in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number", "of the motion for evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index of the subject for training.\")", "Scale of the padded bbox regarding the tight one. level_set = hparams.level_set latent_val", "a trained NASA model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame = amass_name_helper(name) pth", "+ 100000] feed_dict[latent_holder] = latent_val pred = sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds,", "= np.concatenate(preds, axis=2) pred = (pred >= hparams.level_set).astype(np.float32) label = (label[:, :1] >=", "occ = model_fn(batch_holder, noises, None, \"gen_mesh\") occ = tf.reshape(occ, [1, hparams.n_parts + 1,", ":, start:start + 100000] feed_dict[latent_holder] = latent_val pred = sess.run(occ, feed_dict) preds.append(pred) pred", "return feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split(\"-\")[-1]) def compute_iou(sess, feed_dict, latent_holder,", "mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: # pylint: disable=bare-except return None def", "best_iou, ): feed_dict = {} feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou return feed_dict", "def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss, sess, k, hparams): \"\"\"Optimize the pose,", "of optimization steps for tracking each frame.\") flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"], \"Type of", "dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)", "See the License for the specific language governing permissions and # limitations under", "2048, \"Number of vertex samples.\") # Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to generate", "\"\"\"Generating meshes given a trained NASA model.\"\"\" scale = 1.1 # Scale of", "level_set) points = mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import", "\"constant\", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals", "* gt_scale + gt_center faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1)", "the query points.\") # Training Parameters flags.DEFINE_float(\"lr\", 1e-4, \"Learning rate\") flags.DEFINE_string(\"train_dir\", None, \"Training", "to load joint data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight of length constraint loss.\") flags.DEFINE_float(\"trans_range\", 1.,", "None: with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: mesh_model.export(fout, file_type=\"obj\") return subject, motion, frame,", "sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"]", "0], faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: # pylint:", "reset_op, train_op, rec_loss, glue_loss, sess, k, hparams): \"\"\"Optimize the pose, theta, during tracking.\"\"\"", "mise.MISE(32, 3, level_set) points = mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0),", "from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0)", "0: orig_points = points points = points.astype(np.float32) points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution", "True, \"Whether to use projected shape features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The value of the", "[] for i in range(0, n_points, 100000): # Add this to prevent OOM", "path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name),", "pred = sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2) pred = (pred >=", "mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1, \"constant\", constant_values=-1e6) verts, faces,", "the transformation r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims, -1:] r", "\"gen_mesh\") if hparams.sample_vert > 0: points = batch_holder[\"point\"] weights = batch_holder[\"weight\"] n_vert =", "\"\"\"General helper functions.\"\"\" from os import path import numpy as np from skimage", "tqdm import trange import trimesh tf.disable_eager_execution() tfd = tfp.distributions def define_flags(): \"\"\"Define command", "del normals verts -= 1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] -", "[loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description(\"Frame_{0} {1:.4f}|{2:.4f}\".format( k, rec_val, glue_val)) return loss_val, glue_val", "if hparams.sample_vert > 0 else hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims))", "tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[...,", "or right side (False).\") flags.DEFINE_string(\"joint_data\", None, \"Path to load joint data.\") flags.DEFINE_float(\"glue_w\", 20.,", "of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "mesh_extractor = mise.MISE(32, 3, level_set) points = mesh_extractor.query() gt_verts = batch_val[\"vert\"].reshape([-1, 3]) gt_bbox", "verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals verts -=", "datasets from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as tfp from tqdm import trange", "- 0.5) verts = verts * gt_scale + gt_center faces = np.stack([faces[..., 1],", "specific language governing permissions and # limitations under the License. \"\"\"General helper functions.\"\"\"", "disk given a trained NASA model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame =", "of vertex samples.\") # Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to generate meshes only.\")", "range(start, hparams.n_parts + 1): mesh_model = gen_mesh( sess, feed_dict, latent_holder, point_holder, latent, occ,", "hparams, idx=i) mesh_name = \"full_pred.obj\" if mesh_model is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name),", "frame, mesh_model def save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud to disk.\"\"\" name = data[\"name\"][0].decode(\"utf-8\")", "gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1] -", "points = points * gt_scale + gt_center n_points = points.shape[1] values = []", "path import numpy as np from skimage import measure import tensorflow.compat.v1 as tf", "the motion for evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index of the subject for training.\") #", "file_type=\"obj\") return subject, motion, frame, mesh_model def save_pointcloud(data, hparams, pth=\"pointcloud\"): \"\"\"Save pointcloud to", "the reparam trick.\"\"\" sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval =", "{} feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou return feed_dict def parse_global_step(ckpt): basename =", "optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss, sess, k, hparams): \"\"\"Optimize the pose, theta,", "during tracking.\"\"\" sess.run(reset_op) loss_val = 0 glue_val = 0 with trange(hparams.max_steps_per_frame) as t:", "the pose, theta.\"\"\" latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\") if hparams.sample_vert", "label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints,", "iou = 0. eps = 1e-9 latent_val = sess.run(latent, feed_dict) n_points = point.shape[2]", "# Compute distance between links and transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def", "inv_transforms, inv_first_frame_trans, joints, hparams): \"\"\"Compute the prior term as a glue loss.\"\"\" n_dims", "estimaor for the pose, theta, using the reparam trick.\"\"\" sigma = hparams.bandwidth n_samples", "tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert noises =", "vertex samples for tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of noisy samples per", "training.\") flags.DEFINE_bool(\"use_joint\", True, \"Whether to use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of vertex", "latent vector (in total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to use shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5.,", "- 3 ], dtype=np.float32) verts = scale * (verts - 0.5) verts =", "motion = name[6:] return subject, motion, frame def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou,", "hparams): \"\"\"Compute the prior term as a glue loss.\"\"\" n_dims = hparams.n_dims #", "parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of the latent vector (in total).\") flags.DEFINE_bool(\"shared_decoder\", False, \"Whether", "Version 2.0 (the \"License\"); # you may not use this file except in", "hparams.n_dims # Invert the transformation r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[...,", "= weights dist = tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert > 0", "except in compliance with the License. # You may obtain a copy of", "as np from skimage import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import", "= 0. eps = 1e-9 latent_val = sess.run(latent, feed_dict) n_points = point.shape[2] preds", "use left side transformation (True) or right side (False).\") flags.DEFINE_string(\"joint_data\", None, \"Path to", "model.\"\"\" name = batch_val[\"name\"][0].decode(\"utf-8\") subject, motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth,", "frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of noisy samples per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of", "n_points = point.shape[2] preds = [] for start in range(0, n_points, 100000): feed_dict[point_holder]", "mesh_name), \"w\") as fout: pointcloud = data[\"vert\"].reshape([-1, 3]) for v in pointcloud: fout.write(\"v", "= mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1, \"constant\", constant_values=-1e6) verts,", "axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder[\"point\"] = points batch_holder[\"weight\"] =", "unused_var, rec_val, glue_val = sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description(\"Frame_{0} {1:.4f}|{2:.4f}\".format( k,", "axis=0) gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max()", "loss_val, unused_var, rec_val, glue_val = sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description(\"Frame_{0} {1:.4f}|{2:.4f}\".format(", "to prevent OOM due to points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:,", "-tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms", "= points batch_holder[\"weight\"] = weights dist = tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if", "use in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of vertex samples for tracking each", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "latent_holder, point_holder, latent, occ, batch_val, hparams, idx=i) mesh_name = \"full_pred.obj\" if mesh_model is", "of labed vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of minimal loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether", "\"Number of optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number of steps to save checkpoint.\") flags.DEFINE_integer(\"summary_every\",", "import trange import trimesh tf.disable_eager_execution() tfd = tfp.distributions def define_flags(): \"\"\"Define command line", "allowed translations.\") def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=0): \"\"\"Generating", "flags.DEFINE_integer(\"motion\", 0, \"Index of the motion for evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index of the", "np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts", "import tensorflow_probability as tfp from tqdm import trange import trimesh tf.disable_eager_execution() tfd =", "of the dataset.\") flags.DEFINE_string(\"data_dir\", None, \"Directory to load data from.\") flags.mark_flag_as_required(\"data_dir\") flags.DEFINE_integer(\"sample_bbox\", 1024,", "gt_scale + gt_center faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) mesh", "governing permissions and # limitations under the License. \"\"\"General helper functions.\"\"\" from os", "None, \"gen_mesh\") return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder,", "0 else hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ", "end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors in current configuration pred_links", "flags.DEFINE_bool(\"shared_decoder\", False, \"Whether to use shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The constant to blend", "True, \"Whether to use joint-based transformation.\") flags.DEFINE_integer(\"sample_vert\", 2048, \"Number of vertex samples.\") #", "occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1]) occ = tf.reduce_mean(occ[:,", "unused_i in t: loss_val, unused_var, rec_val, glue_val = sess.run( [loss, train_op, rec_loss, glue_loss],", "flags.DEFINE_integer(\"summary_every\", 500, \"Number of steps to save checkpoint.\") flags.DEFINE_float(\"label_w\", 0.5, \"Weight of labed", "12, \"Batch size.\") flags.DEFINE_integer(\"motion\", 0, \"Index of the motion for evaluation.\") flags.DEFINE_integer(\"subject\", 0,", "theta, using the reparam trick.\"\"\" sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent,", "make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ): feed_dict = {} feed_dict[iou_hook] = iou feed_dict[best_hook]", ":]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of father joints and", "samples.\") # Evalulation Parameters flags.DEFINE_bool(\"gen_mesh_only\", False, \"Whether to generate meshes only.\") # Tracking", "def vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A vanilla gradient estimator for the pose, theta.\"\"\" latent_holder,", "in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\", 1024, \"Number of vertex samples for tracking each frame.\")", "1024, \"Number of vertex samples for tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of", "+ gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] != 0:", "label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred,", "faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: # pylint: disable=bare-except", "flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of the gaussian noises.\") flags.DEFINE_bool( \"left_trans\", False, \"Whether to use", "a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\", list(k for k in datasets.dataset_dict.keys()), \"Name of the dataset.\")", "of surface samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\") flags.DEFINE_integer(\"motion\", 0, \"Index of the motion", "per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of the gaussian noises.\") flags.DEFINE_bool( \"left_trans\", False, \"Whether", "dist = tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert", "1024, \"Number of bbox samples.\") flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of surface samples.\") flags.DEFINE_integer(\"batch_size\", 12,", "use shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The constant to blend parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether", "\"Number of noisy samples per vertex\") flags.DEFINE_float(\"bandwidth\", 1e-2, \"Bandwidth of the gaussian noises.\")", "values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1, \"constant\",", "apply it to vectors from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts,", "vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): \"\"\"A vanilla gradient estimator", "gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] != 0: orig_points", "1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights =", "= dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None,", "flags.DEFINE_enum(\"gradient_type\", \"reparam\", [\"vanilla\", \"reparam\"], \"Type of gradient to use in theta optimization.\") flags.DEFINE_integer(\"sample_track_vert\",", "[] for start in range(0, n_points, 100000): feed_dict[point_holder] = point[:, :, start:start +", "fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name): name, frame = name.split(\"-\") subject = name[:5]", "flags.DEFINE_integer(\"sample_surf\", 1024, \"Number of surface samples.\") flags.DEFINE_integer(\"batch_size\", 12, \"Batch size.\") flags.DEFINE_integer(\"motion\", 0, \"Index", "for the pose, theta, using the reparam trick.\"\"\" sigma = hparams.bandwidth n_samples =", "= hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, \"gen_mesh\")", "shared decoder.\") flags.DEFINE_float(\"soft_blend\", 5., \"The constant to blend parts.\") flags.DEFINE_bool(\"projection\", True, \"Whether to", "v in pointcloud: fout.write(\"v {0} {1} {2}\\n\".format(*v.tolist())) def amass_name_helper(name): name, frame = name.split(\"-\")", "define_flags(): \"\"\"Define command line flags.\"\"\" flags = tf.app.flags # Dataset Parameters flags.DEFINE_enum(\"dataset\", \"amass\",", ">= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return iou", "hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights,", "IoU.\"\"\" iou = 0. eps = 1e-9 latent_val = sess.run(latent, feed_dict) n_points =", "\"Weight of labed vertices loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of minimal loss.\") flags.DEFINE_bool(\"use_vert\", True,", "loss.\") flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of minimal loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether to use vertices", "right side (False).\") flags.DEFINE_string(\"joint_data\", None, \"Path to load joint data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight", "if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout:", "\"Number of vertex samples for tracking each frame.\") flags.DEFINE_integer(\"n_noisy_samples\", 8, \"Number of noisy", "\"Index of the motion for evaluation.\") flags.DEFINE_integer(\"subject\", 0, \"Index of the subject for", "100000], axis=1) value = sess.run(occ[:, idx], feed_dict) values.append(value) values = np.concatenate(values, axis=1) values", "0.05, \"Weight of minimal loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether to use vertices on the", "label)), eps) return iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): \"\"\"Compute the", "mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1,", "points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1, \"constant\", constant_values=-1e6)", "name[:5] motion = name[6:] return subject, motion, frame def make_summary_feed_dict( iou_hook, iou, best_hook,", "= path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for i", "feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou return feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt)", "range of allowed translations.\") def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams,", "of father joints and apply it to vectors from frame0 father_transforms = tf.reduce_sum(", "mesh_name), \"w\") as fout: mesh_model.export(fout, file_type=\"obj\") return subject, motion, frame, mesh_model def save_pointcloud(data,", "tfp.distributions def define_flags(): \"\"\"Define command line flags.\"\"\" flags = tf.app.flags # Dataset Parameters", "None, None, \"gen_mesh\") return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn,", "\"The value of the level_set.\") flags.DEFINE_integer(\"n_dims\", 3, \"The dimension of the query points.\")", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1]", "= \"full_pred.obj\" if mesh_model is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout:", "as fout: pointcloud = data[\"vert\"].reshape([-1, 3]) for v in pointcloud: fout.write(\"v {0} {1}", "feed_dict) n_points = point.shape[2] preds = [] for start in range(0, n_points, 100000):", "flags.DEFINE_float(\"minimal_w\", 0.05, \"Weight of minimal loss.\") flags.DEFINE_bool(\"use_vert\", True, \"Whether to use vertices on", "= np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query()", "hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder,", "\"Whether to use projected shape features.\") flags.DEFINE_float(\"level_set\", 0.5, \"The value of the level_set.\")", "= np.pad(value_grid, 1, \"constant\", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set,", "latent, occ, batch_val, hparams, idx=i) mesh_name = \"full_pred.obj\" if mesh_model is not None:", "pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for", "rate\") flags.DEFINE_string(\"train_dir\", None, \"Training directory.\") flags.mark_flag_as_required(\"train_dir\") flags.DEFINE_integer(\"max_steps\", 200000, \"Number of optimization steps.\") flags.DEFINE_integer(\"save_every\",", "tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: pointcloud = data[\"vert\"].reshape([-1,", "not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = \"pointcloud.obj\" with tf.io.gfile.GFile(path.join(pth, mesh_name), \"w\") as fout: pointcloud", "in range(0, n_points, 100000): feed_dict[point_holder] = point[:, :, start:start + 100000] feed_dict[latent_holder] =", "= batch_holder[\"point\"] weights = batch_holder[\"weight\"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],", "of the model.\") flags.DEFINE_integer(\"n_parts\", 24, \"Number of parts.\") flags.DEFINE_integer(\"total_dim\", 960, \"Dimension of the", "flags.DEFINE_string(\"joint_data\", None, \"Path to load joint data.\") flags.DEFINE_float(\"glue_w\", 20., \"Weight of length constraint", "only.\") # Tracking Parameters flags.DEFINE_float(\"theta_lr\", 5e-4, \"Learning rate\") flags.DEFINE_integer(\"max_steps_per_frame\", 1792, \"Number of optimization", "points.astype(np.float32) points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * scale points =", "optimization steps.\") flags.DEFINE_integer(\"save_every\", 5000, \"Number of steps to save checkpoint.\") flags.DEFINE_integer(\"summary_every\", 500, \"Number", "transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of father joints and apply it" ]
[ "with open(\"README.md\", 'r') as f: long_description = f.read() # This call to setup()", ":: 3.5\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python ::", "README file with open(\"README.md\", 'r') as f: long_description = f.read() # This call", "f.read() # This call to setup() does all the work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform", "Language :: Python :: 3.7\", \"Programming Language :: Python :: 3.8\" ], packages=[\"sedpy\"],", ":: MIT License\", \"Programming Language :: Python :: 2.7\", \"Programming Language :: Python", "license=\"MIT\", classifiers=[ \"License :: OSI Approved :: MIT License\", \"Programming Language :: Python", "long_description = f.read() # This call to setup() does all the work setup(name=\"sedpy\",", "Language :: Python :: 2.7\", \"Programming Language :: Python :: 3\", \"Programming Language", "f: long_description = f.read() # This call to setup() does all the work", "OSI Approved :: MIT License\", \"Programming Language :: Python :: 2.7\", \"Programming Language", "\"Programming Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming", "setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[", "\"Programming Language :: Python :: 3.5\", \"Programming Language :: Python :: 3.6\", \"Programming", "open(\"README.md\", 'r') as f: long_description = f.read() # This call to setup() does", "\"License :: OSI Approved :: MIT License\", \"Programming Language :: Python :: 2.7\",", "does all the work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\",", ":: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language :: Python ::", "stream-line editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License :: OSI", "version=\"1.0.0\", description=\"Cross-platform stream-line editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License", "3.7\", \"Programming Language :: Python :: 3.8\" ], packages=[\"sedpy\"], include_package_data=True, entry_points={'console_scripts': ['sedpy =", "file with open(\"README.md\", 'r') as f: long_description = f.read() # This call to", "long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License :: OSI Approved :: MIT", "classifiers=[ \"License :: OSI Approved :: MIT License\", \"Programming Language :: Python ::", "2.7\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.5\",", "Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language", "'r') as f: long_description = f.read() # This call to setup() does all", "author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License :: OSI Approved :: MIT License\", \"Programming Language ::", ":: Python :: 2.7\", \"Programming Language :: Python :: 3\", \"Programming Language ::", ":: 3.7\", \"Programming Language :: Python :: 3.8\" ], packages=[\"sedpy\"], include_package_data=True, entry_points={'console_scripts': ['sedpy", "Python :: 3.5\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python", "call to setup() does all the work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line editing tool\",", "Language :: Python :: 3\", \"Programming Language :: Python :: 3.5\", \"Programming Language", "setup() does all the work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\",", "<filename>setup.py from setuptools import setup # The text of the README file with", "to setup() does all the work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line editing tool\", long_description=long_description,", "Python :: 3\", \"Programming Language :: Python :: 3.5\", \"Programming Language :: Python", "import setup # The text of the README file with open(\"README.md\", 'r') as", "the work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\",", "Approved :: MIT License\", \"Programming Language :: Python :: 2.7\", \"Programming Language ::", "url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License :: OSI Approved :: MIT License\", \"Programming", "all the work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\",", "from setuptools import setup # The text of the README file with open(\"README.md\",", "of the README file with open(\"README.md\", 'r') as f: long_description = f.read() #", "long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License :: OSI Approved :: MIT License\",", "# This call to setup() does all the work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line", "setuptools import setup # The text of the README file with open(\"README.md\", 'r')", "work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\",", "# The text of the README file with open(\"README.md\", 'r') as f: long_description", "description=\"Cross-platform stream-line editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License ::", "tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License :: OSI Approved ::", "This call to setup() does all the work setup(name=\"sedpy\", version=\"1.0.0\", description=\"Cross-platform stream-line editing", ":: Python :: 3.5\", \"Programming Language :: Python :: 3.6\", \"Programming Language ::", ":: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language ::", ":: Python :: 3.7\", \"Programming Language :: Python :: 3.8\" ], packages=[\"sedpy\"], include_package_data=True,", "\"Programming Language :: Python :: 2.7\", \"Programming Language :: Python :: 3\", \"Programming", ":: Python :: 3\", \"Programming Language :: Python :: 3.5\", \"Programming Language ::", "the README file with open(\"README.md\", 'r') as f: long_description = f.read() # This", ":: 3\", \"Programming Language :: Python :: 3.5\", \"Programming Language :: Python ::", "\"Programming Language :: Python :: 3.8\" ], packages=[\"sedpy\"], include_package_data=True, entry_points={'console_scripts': ['sedpy = sedpy.__main__:main']})", "Python :: 2.7\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python", "Language :: Python :: 3.5\", \"Programming Language :: Python :: 3.6\", \"Programming Language", "Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language :: Python", "as f: long_description = f.read() # This call to setup() does all the", "author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License :: OSI Approved :: MIT License\", \"Programming Language", "3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language :: Python :: 3.8\"", "editing tool\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/mritunjaysharma394/sedpy\", author=\"<NAME>\", author_email=\"<EMAIL>\", license=\"MIT\", classifiers=[ \"License :: OSI Approved", "The text of the README file with open(\"README.md\", 'r') as f: long_description =", "MIT License\", \"Programming Language :: Python :: 2.7\", \"Programming Language :: Python ::", "Python :: 3.7\", \"Programming Language :: Python :: 3.8\" ], packages=[\"sedpy\"], include_package_data=True, entry_points={'console_scripts':", "text of the README file with open(\"README.md\", 'r') as f: long_description = f.read()", "\"Programming Language :: Python :: 3.7\", \"Programming Language :: Python :: 3.8\" ],", "\"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.5\", \"Programming", "3\", \"Programming Language :: Python :: 3.5\", \"Programming Language :: Python :: 3.6\",", "setup # The text of the README file with open(\"README.md\", 'r') as f:", "= f.read() # This call to setup() does all the work setup(name=\"sedpy\", version=\"1.0.0\",", ":: 2.7\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python ::", "License\", \"Programming Language :: Python :: 2.7\", \"Programming Language :: Python :: 3\",", "3.5\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\",", ":: OSI Approved :: MIT License\", \"Programming Language :: Python :: 2.7\", \"Programming" ]
[ "continue node.protocol.find_node(search_key) except IndexError: for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range() key =", "logging import random import settings import string from collections import deque from dht.node", "connect_to_unconnected_nodes(self): while True: await asyncio.sleep(1) for node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect = self.loop.create_connection(", "self.value_store, self.listen_port), node.address, int(node.port) ) _, protocol = await connect node.protocol = protocol", "port to listen on.') parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose')", "the loop to start everything. \"\"\" try: self.loop.run_forever() except KeyboardInterrupt: pass self.loop.close() if", "string from collections import deque from dht.node import SelfNode from dht.protocol import DHTServerProtocol,", "parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose') args = parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging", "hash_string( ''.join([random.choice(string.ascii_letters) for _ in range(160)])) logging.info(\"Our key is {}\".format(key)) return key def", "Connect to the initial node if one is known. \"\"\" logging.info(\"Connecting to initial", "to listen for incoming connections. \"\"\" listen = self.loop.create_server( lambda: DHTServerProtocol( self.self_key, self.bucket_tree,", "node.protocol = protocol protocol.node = node def run(self): \"\"\" Run the loop to", "level to info') if args.initial_node is not None: initial_node = tuple(args.initial_node.split(\":\")) else: initial_node", "known. \"\"\" logging.info(\"Connecting to initial node: {}\".format(self.initial_node)) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key,", "self.value_store = self.create_value_store() self.self_key = self.create_self_key() self.self_node = self.create_self_node() self.bucket_tree = self.create_bucket_tree() self.loop", "connect node.protocol = protocol protocol.node = node def run(self): \"\"\" Run the loop", "import random import settings import string from collections import deque from dht.node import", "is not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\" Create a Store to", "self.listen_port), self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async def refresh_nodes(self, key=None, wait=None): to_check = deque([key])", "parser = argparse.ArgumentParser(description='A python DHT') parser.add_argument( '--initial-node', '-n', help='The initial node to connect", "level to debug') if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging level to info') if args.initial_node", "= self.create_self_node() self.bucket_tree = self.create_bucket_tree() self.loop = asyncio.get_event_loop() self.create_server() if self.initial_node is not", "self_node def create_bucket_tree(self): \"\"\" Create the BucketTree to store Nodes. \"\"\" tree =", "deque([key]) while True: if wait is None: wait = 3 # TODO maximum", "BucketTree(self.self_node) return tree def create_server(self): \"\"\" Create the server to listen for incoming", "Create the server to listen for incoming connections. \"\"\" listen = self.loop.create_server( lambda:", "* 2, 30) logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait)) await asyncio.sleep(wait) try: while True: search_key", "module = importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class = getattr(module, 'MemoryStore') return value_store_class() def create_self_key(self):", "return self_node def create_bucket_tree(self): \"\"\" Create the BucketTree to store Nodes. \"\"\" tree", "True: search_key = to_check.pop() nodes = self.bucket_tree.find_nodes(search_key) for node in nodes: if node", "to store values in. \"\"\" module = importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class = getattr(module,", "__init__(self, listen_port, initial_node=None): self.initial_node = initial_node self.listen_port = listen_port logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store", "= parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level to debug') if args.verbose_info: logging.basicConfig(level=logging.INFO)", "create_bucket_tree(self): \"\"\" Create the BucketTree to store Nodes. \"\"\" tree = BucketTree(self.self_node) return", "None: wait = 3 # TODO maximum wait in the settings wait =", "{}\".format(key)) return key def create_self_node(self): \"\"\" Create a Node to represent ourselves. \"\"\"", "'--initial-node', '-n', help='The initial node to connect to (1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p', default=9999,", "to store Nodes. \"\"\" tree = BucketTree(self.self_node) return tree def create_server(self): \"\"\" Create", "\"\"\" logging.info(\"Connecting to initial node: {}\".format(self.initial_node)) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree,", "'0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\" Connect to the initial node if", "3 # TODO maximum wait in the settings wait = min(wait * 2,", "logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store = self.create_value_store() self.self_key = self.create_self_key() self.self_node = self.create_self_node() self.bucket_tree", "wait = min(wait * 2, 30) logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait)) await asyncio.sleep(wait) try:", "for node in nodes: if node == self.self_node or node.protocol is None: continue", "__name__ == \"__main__\": parser = argparse.ArgumentParser(description='A python DHT') parser.add_argument( '--initial-node', '-n', help='The initial", "run(self): \"\"\" Run the loop to start everything. \"\"\" try: self.loop.run_forever() except KeyboardInterrupt:", "asyncio.get_event_loop() self.create_server() if self.initial_node is not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\"", "DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\" Connect", "import SelfNode from dht.protocol import DHTServerProtocol, DHTClientProtocol from dht.routing import BucketTree from dht.utils", "self.value_store, self.listen_port), '0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\" Connect to the initial", "def run(self): \"\"\" Run the loop to start everything. \"\"\" try: self.loop.run_forever() except", "seconds\".format(wait)) await asyncio.sleep(wait) try: while True: search_key = to_check.pop() nodes = self.bucket_tree.find_nodes(search_key) for", "True: if wait is None: wait = 3 # TODO maximum wait in", "self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async def connect_to_unconnected_nodes(self): while", "in range(160)])) logging.info(\"Our key is {}\".format(key)) return key def create_self_node(self): \"\"\" Create a", "async def connect_to_unconnected_nodes(self): while True: await asyncio.sleep(1) for node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect", "import DHTServerProtocol, DHTClientProtocol from dht.routing import BucketTree from dht.utils import hash_string class DHT:", "'-p', default=9999, help='The port to listen on.') parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true',", "import logging import random import settings import string from collections import deque from", "node == self.self_node or node.protocol is None: continue node.protocol.find_node(search_key) except IndexError: for bucket_node", ") _, protocol = await connect node.protocol = protocol protocol.node = node def", "to debug') if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging level to info') if args.initial_node is", "if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging level to info') if args.initial_node is not None:", "Nodes. \"\"\" tree = BucketTree(self.self_node) return tree def create_server(self): \"\"\" Create the server", "is known. \"\"\" logging.info(\"Connecting to initial node: {}\".format(self.initial_node)) connect = self.loop.create_connection( lambda: DHTClientProtocol(", "to listen on.') parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose') args", "is not None: initial_node = tuple(args.initial_node.split(\":\")) else: initial_node = None dht = DHT(args.listen_port,", "settings wait = min(wait * 2, 30) logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait)) await asyncio.sleep(wait)", "self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async def refresh_nodes(self, key=None, wait=None): to_check", "which we will identify ourselves. \"\"\" key = hash_string( ''.join([random.choice(string.ascii_letters) for _ in", "def refresh_nodes(self, key=None, wait=None): to_check = deque([key]) while True: if wait is None:", "Store to store values in. \"\"\" module = importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class =", "key def create_self_node(self): \"\"\" Create a Node to represent ourselves. \"\"\" self_node =", "self.listen_port = listen_port logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store = self.create_value_store() self.self_key = self.create_self_key() self.self_node", "args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level to debug') if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging level", "connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect)", "help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose') args = parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting", "start everything. \"\"\" try: self.loop.run_forever() except KeyboardInterrupt: pass self.loop.close() if __name__ == \"__main__\":", "return tree def create_server(self): \"\"\" Create the server to listen for incoming connections.", "to info') if args.initial_node is not None: initial_node = tuple(args.initial_node.split(\":\")) else: initial_node =", "dht.protocol import DHTServerProtocol, DHTClientProtocol from dht.routing import BucketTree from dht.utils import hash_string class", "self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\" Create a Store to store values in. \"\"\"", "# TODO maximum wait in the settings wait = min(wait * 2, 30)", "a key with which we will identify ourselves. \"\"\" key = hash_string( ''.join([random.choice(string.ascii_letters)", "dht.routing import BucketTree from dht.utils import hash_string class DHT: def __init__(self, listen_port, initial_node=None):", "represent ourselves. \"\"\" self_node = SelfNode(key=self.self_key) return self_node def create_bucket_tree(self): \"\"\" Create the", "def create_value_store(self): \"\"\" Create a Store to store values in. \"\"\" module =", "tree = BucketTree(self.self_node) return tree def create_server(self): \"\"\" Create the server to listen", "= self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async", "logging.info(\"Our key is {}\".format(key)) return key def create_self_node(self): \"\"\" Create a Node to", "settings.VALUE_STORE) value_store_class = getattr(module, 'MemoryStore') return value_store_class() def create_self_key(self): \"\"\" Create a key", "create_server(self): \"\"\" Create the server to listen for incoming connections. \"\"\" listen =", "not None: initial_node = tuple(args.initial_node.split(\":\")) else: initial_node = None dht = DHT(args.listen_port, initial_node)", "initial_node self.listen_port = listen_port logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store = self.create_value_store() self.self_key = self.create_self_key()", "lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async def refresh_nodes(self,", "import importlib import logging import random import settings import string from collections import", "self_node = SelfNode(key=self.self_key) return self_node def create_bucket_tree(self): \"\"\" Create the BucketTree to store", "node def run(self): \"\"\" Run the loop to start everything. \"\"\" try: self.loop.run_forever()", "ourselves. \"\"\" self_node = SelfNode(key=self.self_key) return self_node def create_bucket_tree(self): \"\"\" Create the BucketTree", "the settings wait = min(wait * 2, 30) logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait)) await", "= initial_node self.listen_port = listen_port logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store = self.create_value_store() self.self_key =", "for node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store,", "None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\" Create a Store to store values", "values in. \"\"\" module = importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class = getattr(module, 'MemoryStore') return", "= self.bucket_tree.find_nodes(search_key) for node in nodes: if node == self.self_node or node.protocol is", "int(node.port) ) _, protocol = await connect node.protocol = protocol protocol.node = node", "_, protocol = await connect node.protocol = protocol protocol.node = node def run(self):", "Create the BucketTree to store Nodes. \"\"\" tree = BucketTree(self.self_node) return tree def", "import asyncio import importlib import logging import random import settings import string from", "key with which we will identify ourselves. \"\"\" key = hash_string( ''.join([random.choice(string.ascii_letters) for", "parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level to debug') if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting", "argparse.ArgumentParser(description='A python DHT') parser.add_argument( '--initial-node', '-n', help='The initial node to connect to (1.2.3.4:5678).')", "SelfNode(key=self.self_key) return self_node def create_bucket_tree(self): \"\"\" Create the BucketTree to store Nodes. \"\"\"", "in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async def connect_to_unconnected_nodes(self):", "import argparse import asyncio import importlib import logging import random import settings import", "Create a Store to store values in. \"\"\" module = importlib.import_module('value_stores.' + settings.VALUE_STORE)", "= self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), node.address, int(node.port) ) _, protocol", "return key def create_self_node(self): \"\"\" Create a Node to represent ourselves. \"\"\" self_node", "if self.initial_node is not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\" Create a", "self.loop.run_forever() except KeyboardInterrupt: pass self.loop.close() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='A python", "bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async def", "getattr(module, 'MemoryStore') return value_store_class() def create_self_key(self): \"\"\" Create a key with which we", "or node.protocol is None: continue node.protocol.find_node(search_key) except IndexError: for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range", "def create_self_key(self): \"\"\" Create a key with which we will identify ourselves. \"\"\"", "Create a key with which we will identify ourselves. \"\"\" key = hash_string(", "pass self.loop.close() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='A python DHT') parser.add_argument( '--initial-node',", "self.loop.create_server( lambda: DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen) def connect_to_initial_node(self):", "= await connect node.protocol = protocol protocol.node = node def run(self): \"\"\" Run", "30) logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait)) await asyncio.sleep(wait) try: while True: search_key = to_check.pop()", "self.bucket_tree = self.create_bucket_tree() self.loop = asyncio.get_event_loop() self.create_server() if self.initial_node is not None: self.connect_to_initial_node()", "= SelfNode(key=self.self_key) return self_node def create_bucket_tree(self): \"\"\" Create the BucketTree to store Nodes.", "self.create_value_store() self.self_key = self.create_self_key() self.self_node = self.create_self_node() self.bucket_tree = self.create_bucket_tree() self.loop = asyncio.get_event_loop()", "= min(wait * 2, 30) logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait)) await asyncio.sleep(wait) try: while", "if node == self.self_node or node.protocol is None: continue node.protocol.find_node(search_key) except IndexError: for", "BucketTree to store Nodes. \"\"\" tree = BucketTree(self.self_node) return tree def create_server(self): \"\"\"", "node.protocol is None: continue node.protocol.find_node(search_key) except IndexError: for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range =", "def create_self_node(self): \"\"\" Create a Node to represent ourselves. \"\"\" self_node = SelfNode(key=self.self_key)", "BucketTree from dht.utils import hash_string class DHT: def __init__(self, listen_port, initial_node=None): self.initial_node =", "\"\"\" Create a Store to store values in. \"\"\" module = importlib.import_module('value_stores.' +", "\"__main__\": parser = argparse.ArgumentParser(description='A python DHT') parser.add_argument( '--initial-node', '-n', help='The initial node to", "the BucketTree to store Nodes. \"\"\" tree = BucketTree(self.self_node) return tree def create_server(self):", "the server to listen for incoming connections. \"\"\" listen = self.loop.create_server( lambda: DHTServerProtocol(", "self.create_bucket_tree() self.loop = asyncio.get_event_loop() self.create_server() if self.initial_node is not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes())", "settings import string from collections import deque from dht.node import SelfNode from dht.protocol", "from dht.utils import hash_string class DHT: def __init__(self, listen_port, initial_node=None): self.initial_node = initial_node", "IndexError: for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key)", "verbose') args = parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level to debug') if", "a Store to store values in. \"\"\" module = importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class", "is None: wait = 3 # TODO maximum wait in the settings wait", "= node def run(self): \"\"\" Run the loop to start everything. \"\"\" try:", "protocol.node = node def run(self): \"\"\" Run the loop to start everything. \"\"\"", "import BucketTree from dht.utils import hash_string class DHT: def __init__(self, listen_port, initial_node=None): self.initial_node", "default=9999, help='The port to listen on.') parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug',", "if args.initial_node is not None: initial_node = tuple(args.initial_node.split(\":\")) else: initial_node = None dht", "self.listen_port), node.address, int(node.port) ) _, protocol = await connect node.protocol = protocol protocol.node", "self.create_self_node() self.bucket_tree = self.create_bucket_tree() self.loop = asyncio.get_event_loop() self.create_server() if self.initial_node is not None:", "= bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async def connect_to_unconnected_nodes(self): while True: await", "self.bucket_tree.find_nodes(search_key) for node in nodes: if node == self.self_node or node.protocol is None:", ") self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\" Connect to the initial node if one is", "parser.add_argument( '--initial-node', '-n', help='The initial node to connect to (1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p',", "{}\".format(self.initial_node)) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1]) )", "import settings import string from collections import deque from dht.node import SelfNode from", "from dht.node import SelfNode from dht.protocol import DHTServerProtocol, DHTClientProtocol from dht.routing import BucketTree", "hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async def connect_to_unconnected_nodes(self): while True: await asyncio.sleep(1) for node in", "logging.info('Setting logging level to info') if args.initial_node is not None: initial_node = tuple(args.initial_node.split(\":\"))", "<filename>dht/main.py import argparse import asyncio import importlib import logging import random import settings", "if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level to debug') if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging", "identify ourselves. \"\"\" key = hash_string( ''.join([random.choice(string.ascii_letters) for _ in range(160)])) logging.info(\"Our key", "\"\"\" Connect to the initial node if one is known. \"\"\" logging.info(\"Connecting to", "refresh_nodes(self, key=None, wait=None): to_check = deque([key]) while True: if wait is None: wait", "= listen_port logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store = self.create_value_store() self.self_key = self.create_self_key() self.self_node =", "deque from dht.node import SelfNode from dht.protocol import DHTServerProtocol, DHTClientProtocol from dht.routing import", "= getattr(module, 'MemoryStore') return value_store_class() def create_self_key(self): \"\"\" Create a key with which", "_ in range(160)])) logging.info(\"Our key is {}\".format(key)) return key def create_self_node(self): \"\"\" Create", "def connect_to_unconnected_nodes(self): while True: await asyncio.sleep(1) for node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect =", "nodes: if node == self.self_node or node.protocol is None: continue node.protocol.find_node(search_key) except IndexError:", "try: self.loop.run_forever() except KeyboardInterrupt: pass self.loop.close() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='A", "self.self_node = self.create_self_node() self.bucket_tree = self.create_bucket_tree() self.loop = asyncio.get_event_loop() self.create_server() if self.initial_node is", "nodes = self.bucket_tree.find_nodes(search_key) for node in nodes: if node == self.self_node or node.protocol", "to_check.appendleft(key) async def connect_to_unconnected_nodes(self): while True: await asyncio.sleep(1) for node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node)", "key = hash_string( ''.join([random.choice(string.ascii_letters) for _ in range(160)])) logging.info(\"Our key is {}\".format(key)) return", "\"\"\" key = hash_string( ''.join([random.choice(string.ascii_letters) for _ in range(160)])) logging.info(\"Our key is {}\".format(key))", "to initial node: {}\".format(self.initial_node)) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port),", "store Nodes. \"\"\" tree = BucketTree(self.self_node) return tree def create_server(self): \"\"\" Create the", "bucket_node_range = bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async def connect_to_unconnected_nodes(self): while True:", "in nodes: if node == self.self_node or node.protocol is None: continue node.protocol.find_node(search_key) except", "loop to start everything. \"\"\" try: self.loop.run_forever() except KeyboardInterrupt: pass self.loop.close() if __name__", "to_check = deque([key]) while True: if wait is None: wait = 3 #", "import hash_string class DHT: def __init__(self, listen_port, initial_node=None): self.initial_node = initial_node self.listen_port =", "to_check.pop() nodes = self.bucket_tree.find_nodes(search_key) for node in nodes: if node == self.self_node or", "connect to (1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p', default=9999, help='The port to listen on.') parser.add_argument('-v',", "while True: await asyncio.sleep(1) for node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect = self.loop.create_connection( lambda:", "we will identify ourselves. \"\"\" key = hash_string( ''.join([random.choice(string.ascii_letters) for _ in range(160)]))", "for _ in range(160)])) logging.info(\"Our key is {}\".format(key)) return key def create_self_node(self): \"\"\"", "not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\" Create a Store to store", "self.listen_port ) self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\" Connect to the initial node if one", "protocol = await connect node.protocol = protocol protocol.node = node def run(self): \"\"\"", "args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging level to info') if args.initial_node is not None: initial_node", "to connect to (1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p', default=9999, help='The port to listen on.')", "DHTServerProtocol, DHTClientProtocol from dht.routing import BucketTree from dht.utils import hash_string class DHT: def", "dest='verbose_debug', help='More verbose') args = parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level to", "= protocol protocol.node = node def run(self): \"\"\" Run the loop to start", "DHT') parser.add_argument( '--initial-node', '-n', help='The initial node to connect to (1.2.3.4:5678).') parser.add_argument( '--listen-port',", "self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), node.address, int(node.port)", "self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async def", "initial node if one is known. \"\"\" logging.info(\"Connecting to initial node: {}\".format(self.initial_node)) connect", "None: initial_node = tuple(args.initial_node.split(\":\")) else: initial_node = None dht = DHT(args.listen_port, initial_node) dht.run()", "True: await asyncio.sleep(1) for node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect = self.loop.create_connection( lambda: DHTClientProtocol(", "= importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class = getattr(module, 'MemoryStore') return value_store_class() def create_self_key(self): \"\"\"", "debug') if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging level to info') if args.initial_node is not", "if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='A python DHT') parser.add_argument( '--initial-node', '-n', help='The", "= self.create_bucket_tree() self.loop = asyncio.get_event_loop() self.create_server() if self.initial_node is not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key))", "on {}\".format(self.listen_port)) self.value_store = self.create_value_store() self.self_key = self.create_self_key() self.self_node = self.create_self_node() self.bucket_tree =", "key is {}\".format(key)) return key def create_self_node(self): \"\"\" Create a Node to represent", "\"\"\" listen = self.loop.create_server( lambda: DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0', self.listen_port )", "self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\" Create a Store to store values in.", "in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), node.address,", "argparse import asyncio import importlib import logging import random import settings import string", "wait is None: wait = 3 # TODO maximum wait in the settings", "search_key = to_check.pop() nodes = self.bucket_tree.find_nodes(search_key) for node in nodes: if node ==", "Node to represent ourselves. \"\"\" self_node = SelfNode(key=self.self_key) return self_node def create_bucket_tree(self): \"\"\"", "dht.node import SelfNode from dht.protocol import DHTServerProtocol, DHTClientProtocol from dht.routing import BucketTree from", "initial node: {}\".format(self.initial_node)) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0],", "bucket_node_range[1]))[2:] to_check.appendleft(key) async def connect_to_unconnected_nodes(self): while True: await asyncio.sleep(1) for node in self.bucket_tree.get_unconnected_nodes():", "connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), node.address, int(node.port) ) _,", "{:d} seconds\".format(wait)) await asyncio.sleep(wait) try: while True: search_key = to_check.pop() nodes = self.bucket_tree.find_nodes(search_key)", "node to connect to (1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p', default=9999, help='The port to listen", "+ settings.VALUE_STORE) value_store_class = getattr(module, 'MemoryStore') return value_store_class() def create_self_key(self): \"\"\" Create a", "sleeping {:d} seconds\".format(wait)) await asyncio.sleep(wait) try: while True: search_key = to_check.pop() nodes =", "self.create_server() if self.initial_node is not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\" Create", "action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose') args = parser.parse_args() if args.verbose_debug:", "from dht.protocol import DHTServerProtocol, DHTClientProtocol from dht.routing import BucketTree from dht.utils import hash_string", "self.initial_node is not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\" Create a Store", "value_store_class() def create_self_key(self): \"\"\" Create a key with which we will identify ourselves.", "asyncio.sleep(wait) try: while True: search_key = to_check.pop() nodes = self.bucket_tree.find_nodes(search_key) for node in", "wait = 3 # TODO maximum wait in the settings wait = min(wait", "Create a Node to represent ourselves. \"\"\" self_node = SelfNode(key=self.self_key) return self_node def", "self.loop = asyncio.get_event_loop() self.create_server() if self.initial_node is not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def", "protocol protocol.node = node def run(self): \"\"\" Run the loop to start everything.", "for incoming connections. \"\"\" listen = self.loop.create_server( lambda: DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port),", "= 3 # TODO maximum wait in the settings wait = min(wait *", "is {}\".format(key)) return key def create_self_node(self): \"\"\" Create a Node to represent ourselves.", "\"\"\" module = importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class = getattr(module, 'MemoryStore') return value_store_class() def", "to represent ourselves. \"\"\" self_node = SelfNode(key=self.self_key) return self_node def create_bucket_tree(self): \"\"\" Create", "class DHT: def __init__(self, listen_port, initial_node=None): self.initial_node = initial_node self.listen_port = listen_port logging.info(\"Listening", "node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port),", "importlib import logging import random import settings import string from collections import deque", "node if one is known. \"\"\" logging.info(\"Connecting to initial node: {}\".format(self.initial_node)) connect =", "DHT: def __init__(self, listen_port, initial_node=None): self.initial_node = initial_node self.listen_port = listen_port logging.info(\"Listening on", "'-n', help='The initial node to connect to (1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p', default=9999, help='The", "initial_node=None): self.initial_node = initial_node self.listen_port = listen_port logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store = self.create_value_store()", ") self.loop.run_until_complete(connect) async def refresh_nodes(self, key=None, wait=None): to_check = deque([key]) while True: if", "= BucketTree(self.self_node) return tree def create_server(self): \"\"\" Create the server to listen for", "listen_port, initial_node=None): self.initial_node = initial_node self.listen_port = listen_port logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store =", "self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\" Connect to the initial node if one is known.", "await asyncio.sleep(wait) try: while True: search_key = to_check.pop() nodes = self.bucket_tree.find_nodes(search_key) for node", "key=None, wait=None): to_check = deque([key]) while True: if wait is None: wait =", "random import settings import string from collections import deque from dht.node import SelfNode", "from collections import deque from dht.node import SelfNode from dht.protocol import DHTServerProtocol, DHTClientProtocol", "logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait)) await asyncio.sleep(wait) try: while True: search_key = to_check.pop() nodes", "parser.add_argument( '--listen-port', '-p', default=9999, help='The port to listen on.') parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose')", "except IndexError: for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:]", "on.') parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose') args = parser.parse_args()", "DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async def refresh_nodes(self, key=None,", "collections import deque from dht.node import SelfNode from dht.protocol import DHTServerProtocol, DHTClientProtocol from", "self.self_key, self.bucket_tree, self.value_store, self.listen_port), node.address, int(node.port) ) _, protocol = await connect node.protocol", "\"\"\" self_node = SelfNode(key=self.self_key) return self_node def create_bucket_tree(self): \"\"\" Create the BucketTree to", "self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async def refresh_nodes(self, key=None, wait=None): to_check =", "one is known. \"\"\" logging.info(\"Connecting to initial node: {}\".format(self.initial_node)) connect = self.loop.create_connection( lambda:", "listen on.') parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose') args =", "'--listen-port', '-p', default=9999, help='The port to listen on.') parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv',", "DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), node.address, int(node.port) ) _, protocol = await connect", "logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level to debug') if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging level to", "incoming connections. \"\"\" listen = self.loop.create_server( lambda: DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0',", "create_self_key(self): \"\"\" Create a key with which we will identify ourselves. \"\"\" key", "hash_string class DHT: def __init__(self, listen_port, initial_node=None): self.initial_node = initial_node self.listen_port = listen_port", "will identify ourselves. \"\"\" key = hash_string( ''.join([random.choice(string.ascii_letters) for _ in range(160)])) logging.info(\"Our", "node in nodes: if node == self.self_node or node.protocol is None: continue node.protocol.find_node(search_key)", "def __init__(self, listen_port, initial_node=None): self.initial_node = initial_node self.listen_port = listen_port logging.info(\"Listening on {}\".format(self.listen_port))", "a Node to represent ourselves. \"\"\" self_node = SelfNode(key=self.self_key) return self_node def create_bucket_tree(self):", "self.self_node or node.protocol is None: continue node.protocol.find_node(search_key) except IndexError: for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False):", "wait in the settings wait = min(wait * 2, 30) logging.debug(\"refresh_node sleeping {:d}", "args = parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level to debug') if args.verbose_info:", "self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\" Connect to the", "help='The port to listen on.') parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More", "dht.utils import hash_string class DHT: def __init__(self, listen_port, initial_node=None): self.initial_node = initial_node self.listen_port", "node: {}\".format(self.initial_node)) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1])", "(1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p', default=9999, help='The port to listen on.') parser.add_argument('-v', action='store_true', dest='verbose_info',", "= self.create_value_store() self.self_key = self.create_self_key() self.self_node = self.create_self_node() self.bucket_tree = self.create_bucket_tree() self.loop =", "= asyncio.get_event_loop() self.create_server() if self.initial_node is not None: self.connect_to_initial_node() self.loop.create_task(self.refresh_nodes(key=self.self_key)) self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self):", "with which we will identify ourselves. \"\"\" key = hash_string( ''.join([random.choice(string.ascii_letters) for _", "self.loop.run_until_complete(connect) async def refresh_nodes(self, key=None, wait=None): to_check = deque([key]) while True: if wait", "listen_port logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store = self.create_value_store() self.self_key = self.create_self_key() self.self_node = self.create_self_node()", "if wait is None: wait = 3 # TODO maximum wait in the", "Run the loop to start everything. \"\"\" try: self.loop.run_forever() except KeyboardInterrupt: pass self.loop.close()", "to start everything. \"\"\" try: self.loop.run_forever() except KeyboardInterrupt: pass self.loop.close() if __name__ ==", "if one is known. \"\"\" logging.info(\"Connecting to initial node: {}\".format(self.initial_node)) connect = self.loop.create_connection(", "help='The initial node to connect to (1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p', default=9999, help='The port", "maximum wait in the settings wait = min(wait * 2, 30) logging.debug(\"refresh_node sleeping", "= hash_string( ''.join([random.choice(string.ascii_letters) for _ in range(160)])) logging.info(\"Our key is {}\".format(key)) return key", "to the initial node if one is known. \"\"\" logging.info(\"Connecting to initial node:", "self.initial_node = initial_node self.listen_port = listen_port logging.info(\"Listening on {}\".format(self.listen_port)) self.value_store = self.create_value_store() self.self_key", "logging.debug('Setting logging level to debug') if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging level to info')", "store values in. \"\"\" module = importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class = getattr(module, 'MemoryStore')", "def connect_to_initial_node(self): \"\"\" Connect to the initial node if one is known. \"\"\"", "lambda: DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\"", "wait=None): to_check = deque([key]) while True: if wait is None: wait = 3", "logging level to info') if args.initial_node is not None: initial_node = tuple(args.initial_node.split(\":\")) else:", "import deque from dht.node import SelfNode from dht.protocol import DHTServerProtocol, DHTClientProtocol from dht.routing", "range(160)])) logging.info(\"Our key is {}\".format(key)) return key def create_self_node(self): \"\"\" Create a Node", "except KeyboardInterrupt: pass self.loop.close() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='A python DHT')", "create_value_store(self): \"\"\" Create a Store to store values in. \"\"\" module = importlib.import_module('value_stores.'", "def create_bucket_tree(self): \"\"\" Create the BucketTree to store Nodes. \"\"\" tree = BucketTree(self.self_node)", "\"\"\" Create the BucketTree to store Nodes. \"\"\" tree = BucketTree(self.self_node) return tree", "listen for incoming connections. \"\"\" listen = self.loop.create_server( lambda: DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store,", "DHTClientProtocol from dht.routing import BucketTree from dht.utils import hash_string class DHT: def __init__(self,", "KeyboardInterrupt: pass self.loop.close() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='A python DHT') parser.add_argument(", "logging level to debug') if args.verbose_info: logging.basicConfig(level=logging.INFO) logging.info('Setting logging level to info') if", "asyncio import importlib import logging import random import settings import string from collections", "import string from collections import deque from dht.node import SelfNode from dht.protocol import", "connect_to_initial_node(self): \"\"\" Connect to the initial node if one is known. \"\"\" logging.info(\"Connecting", "connections. \"\"\" listen = self.loop.create_server( lambda: DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0', self.listen_port", "self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async def refresh_nodes(self, key=None, wait=None): to_check = deque([key]) while", "everything. \"\"\" try: self.loop.run_forever() except KeyboardInterrupt: pass self.loop.close() if __name__ == \"__main__\": parser", "self.loop.close() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='A python DHT') parser.add_argument( '--initial-node', '-n',", "'MemoryStore') return value_store_class() def create_self_key(self): \"\"\" Create a key with which we will", "int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async def refresh_nodes(self, key=None, wait=None): to_check = deque([key]) while True:", "logging.debug(node) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), node.address, int(node.port) )", "dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose') args = parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG)", "= hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async def connect_to_unconnected_nodes(self): while True: await asyncio.sleep(1) for node", "node.address, int(node.port) ) _, protocol = await connect node.protocol = protocol protocol.node =", "in. \"\"\" module = importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class = getattr(module, 'MemoryStore') return value_store_class()", "async def refresh_nodes(self, key=None, wait=None): to_check = deque([key]) while True: if wait is", "while True: if wait is None: wait = 3 # TODO maximum wait", "= self.create_self_key() self.self_node = self.create_self_node() self.bucket_tree = self.create_bucket_tree() self.loop = asyncio.get_event_loop() self.create_server() if", "return value_store_class() def create_self_key(self): \"\"\" Create a key with which we will identify", "for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async", "while True: search_key = to_check.pop() nodes = self.bucket_tree.find_nodes(search_key) for node in nodes: if", "bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async def connect_to_unconnected_nodes(self): while True: await asyncio.sleep(1)", "\"\"\" try: self.loop.run_forever() except KeyboardInterrupt: pass self.loop.close() if __name__ == \"__main__\": parser =", "None: continue node.protocol.find_node(search_key) except IndexError: for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range() key", "from dht.routing import BucketTree from dht.utils import hash_string class DHT: def __init__(self, listen_port,", "asyncio.sleep(1) for node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree,", "\"\"\" Create the server to listen for incoming connections. \"\"\" listen = self.loop.create_server(", "self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), node.address, int(node.port) ) _, protocol =", "initial node to connect to (1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p', default=9999, help='The port to", "ourselves. \"\"\" key = hash_string( ''.join([random.choice(string.ascii_letters) for _ in range(160)])) logging.info(\"Our key is", "self.create_self_key() self.self_node = self.create_self_node() self.bucket_tree = self.create_bucket_tree() self.loop = asyncio.get_event_loop() self.create_server() if self.initial_node", "\"\"\" Create a key with which we will identify ourselves. \"\"\" key =", "== self.self_node or node.protocol is None: continue node.protocol.find_node(search_key) except IndexError: for bucket_node in", "= self.loop.create_server( lambda: DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen) def", "in the settings wait = min(wait * 2, 30) logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait))", "TODO maximum wait in the settings wait = min(wait * 2, 30) logging.debug(\"refresh_node", "''.join([random.choice(string.ascii_letters) for _ in range(160)])) logging.info(\"Our key is {}\".format(key)) return key def create_self_node(self):", "def create_server(self): \"\"\" Create the server to listen for incoming connections. \"\"\" listen", "node.protocol.find_node(search_key) except IndexError: for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range() key = hex(random.randrange(bucket_node_range[0],", "key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:] to_check.appendleft(key) async def connect_to_unconnected_nodes(self): while True: await asyncio.sleep(1) for", "value_store_class = getattr(module, 'MemoryStore') return value_store_class() def create_self_key(self): \"\"\" Create a key with", "listen = self.loop.create_server( lambda: DHTServerProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen)", "2, 30) logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait)) await asyncio.sleep(wait) try: while True: search_key =", "is None: continue node.protocol.find_node(search_key) except IndexError: for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False): bucket_node_range = bucket_node.get_range()", "\"\"\" Run the loop to start everything. \"\"\" try: self.loop.run_forever() except KeyboardInterrupt: pass", "to (1.2.3.4:5678).') parser.add_argument( '--listen-port', '-p', default=9999, help='The port to listen on.') parser.add_argument('-v', action='store_true',", "self.bucket_tree, self.value_store, self.listen_port), node.address, int(node.port) ) _, protocol = await connect node.protocol =", "importlib.import_module('value_stores.' + settings.VALUE_STORE) value_store_class = getattr(module, 'MemoryStore') return value_store_class() def create_self_key(self): \"\"\" Create", "\"\"\" tree = BucketTree(self.self_node) return tree def create_server(self): \"\"\" Create the server to", "args.initial_node is not None: initial_node = tuple(args.initial_node.split(\":\")) else: initial_node = None dht =", "try: while True: search_key = to_check.pop() nodes = self.bucket_tree.find_nodes(search_key) for node in nodes:", "info') if args.initial_node is not None: initial_node = tuple(args.initial_node.split(\":\")) else: initial_node = None", "{}\".format(self.listen_port)) self.value_store = self.create_value_store() self.self_key = self.create_self_key() self.self_node = self.create_self_node() self.bucket_tree = self.create_bucket_tree()", "= to_check.pop() nodes = self.bucket_tree.find_nodes(search_key) for node in nodes: if node == self.self_node", "= argparse.ArgumentParser(description='A python DHT') parser.add_argument( '--initial-node', '-n', help='The initial node to connect to", "== \"__main__\": parser = argparse.ArgumentParser(description='A python DHT') parser.add_argument( '--initial-node', '-n', help='The initial node", "self.self_key = self.create_self_key() self.self_node = self.create_self_node() self.bucket_tree = self.create_bucket_tree() self.loop = asyncio.get_event_loop() self.create_server()", "self.self_key, self.bucket_tree, self.value_store, self.listen_port), self.initial_node[0], int(self.initial_node[1]) ) self.loop.run_until_complete(connect) async def refresh_nodes(self, key=None, wait=None):", "action='store_true', dest='verbose_debug', help='More verbose') args = parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level", "lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store, self.listen_port), node.address, int(node.port) ) _, protocol = await", "self.listen_port), '0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\" Connect to the initial node", "\"\"\" Create a Node to represent ourselves. \"\"\" self_node = SelfNode(key=self.self_key) return self_node", "= deque([key]) while True: if wait is None: wait = 3 # TODO", "await connect node.protocol = protocol protocol.node = node def run(self): \"\"\" Run the", "self.self_key, self.bucket_tree, self.value_store, self.listen_port), '0.0.0.0', self.listen_port ) self.loop.run_until_complete(listen) def connect_to_initial_node(self): \"\"\" Connect to", "server to listen for incoming connections. \"\"\" listen = self.loop.create_server( lambda: DHTServerProtocol( self.self_key,", "create_self_node(self): \"\"\" Create a Node to represent ourselves. \"\"\" self_node = SelfNode(key=self.self_key) return", "tree def create_server(self): \"\"\" Create the server to listen for incoming connections. \"\"\"", "logging.basicConfig(level=logging.INFO) logging.info('Setting logging level to info') if args.initial_node is not None: initial_node =", "python DHT') parser.add_argument( '--initial-node', '-n', help='The initial node to connect to (1.2.3.4:5678).') parser.add_argument(", "min(wait * 2, 30) logging.debug(\"refresh_node sleeping {:d} seconds\".format(wait)) await asyncio.sleep(wait) try: while True:", "parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose') parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose') args = parser.parse_args() if", "logging.info(\"Connecting to initial node: {}\".format(self.initial_node)) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key, self.bucket_tree, self.value_store,", "SelfNode from dht.protocol import DHTServerProtocol, DHTClientProtocol from dht.routing import BucketTree from dht.utils import", "self.loop.create_task(self.connect_to_unconnected_nodes()) def create_value_store(self): \"\"\" Create a Store to store values in. \"\"\" module", "await asyncio.sleep(1) for node in self.bucket_tree.get_unconnected_nodes(): logging.debug(node) connect = self.loop.create_connection( lambda: DHTClientProtocol( self.self_key,", "the initial node if one is known. \"\"\" logging.info(\"Connecting to initial node: {}\".format(self.initial_node))", "help='More verbose') args = parser.parse_args() if args.verbose_debug: logging.basicConfig(level=logging.DEBUG) logging.debug('Setting logging level to debug')" ]
[ "to check and find files from UNIX-based systems This should be syncronized for", "csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) path_list =", "= \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' +", "input_transcript_path = all_texts[0] print(input_transcript_path) print(\"\\n\") return input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given a", "time_f + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/'", "columns_values = kwargs['cols'] # check if csv file exists check_csv_exists(csv_path) if len(args) >", "all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts) != 1: print(\"Too many transcripts! Please use", "names) kwargs: - cols: List of names for columns (matched to args) -", "= current_list idx = idx + 1 today_date = '_' + str(datetime.date.today()) datetime_object", "path_list def get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path = locate_single_txt(folder_path) if csv_flag == False: check_ending_format(transcript_path,", "list2): if len(list1) != len(list2): print(\"Error, your list1 and list2 have different lengths\")", "pd from os.path import join as pj from os.path import exists import collections", "column To-do: check if in csv file the header_none will throw count the", "check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:] != ending_format_substring: print(\"Error in the format, must end with", "audio_extension = 'wav', confirm_with_transcript = True, verbose = False): \"\"\" Confirm_with_transcript requires: file", "csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/'", "single_col: GT_list = transcript_data[0].tolist() else: GT_list = transcript_data[1].tolist() return GT_list def get_list_of_audios(folder_path, audio_extension", "with open(log_file, mode) as f: f.write(msg) #If {both} is true, print to terminal", "= { 'time_format': True, 'txt_flag': False } kwargs = { **defaultKwargs, **kwargs }", "= '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.txt' my_df.to_csv(full_output_csv_path,", "processes # # # def locate_single_txt(src_dir, obj_format = 'txt'): \"\"\" Input: - Folder", "= transcript_data[0].tolist() return path_list def get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path = locate_single_txt(folder_path) if csv_flag", "TXT and CSV # # # def log_message(msg, log_file, mode, both=True): '''Function that", "'.csv' my_df.to_csv(full_output_csv_path, index=False) else: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4]", "1\") else: input_transcript_path = all_texts[0] print(input_transcript_path) print(\"\\n\") return input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\"", "header_none will throw count the header as the first row. To-do: Add a", "'/' + csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1])", "+ today_date + time_f + '.csv' my_df.to_csv(full_output_csv_path, index=False) else: if kwargs['txt_flag']: full_output_csv_path =", "print(\"\\n\") return input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given a transcript in txt (or", "print(\"Error in the format, must end with {}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1, list2): if", "write_my_csv(*args, **kwargs): \"\"\" Function to write csv files. args: - Columns for the", "collections.Counter(transcript_lists): if verbose: print(\"Transcript and audios are consistent\") else: print(\"Transcript paths and names", "functions to check and find files from UNIX-based systems This should be syncronized", "the names) kwargs: - cols: List of names for columns (matched to args)", "as f: f.write(msg) #If {both} is true, print to terminal as well if", "if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path, header=False,", "verbose: print(\"Transcript and audios are consistent\") else: print(\"Transcript paths and names of files", "\"\"\" Return true if folder is NOT empty \"\"\" if (len(os.listdir(current_folder_path)) != 0):", "current_file_path[-3:] != ending_format_substring: print(\"Error in the format, must end with {}!\".format(ending_format_substring)) sys.exit() def", "'/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False)", "import join as pj from os.path import exists import collections import datetime ##", "'wav', confirm_with_transcript = True, verbose = False): \"\"\" Confirm_with_transcript requires: file path in", "as the first row. To-do: Add a function to only compare the filename,", "csv_path.split('/')[-1][0:-4] + today_date + time_f + '.csv' my_df.to_csv(full_output_csv_path, index=False) else: if kwargs['txt_flag']: full_output_csv_path", "is NOT empty \"\"\" if (len(os.listdir(current_folder_path)) != 0): return True else: print(\"Folder {}", "2: check_same_length(args[0], args[1]) elif len(args) > 3: check_same_length(args[1], args[2]) idx = 0 for", "Utilities functions to check and find files from UNIX-based systems This should be", "def locate_single_txt(src_dir, obj_format = 'txt'): \"\"\" Input: - Folder path Output: - Path", "else: print(\"Folder {} is empty\".format(current_folder_path)) return False def check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:] !=", "to write csv files. args: - Columns for the csv (matched to the", "and CSV # # # def log_message(msg, log_file, mode, both=True): '''Function that prints", "def check_same_length(list1, list2): if len(list1) != len(list2): print(\"Error, your list1 and list2 have", "= datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1])", "\"\"\" defaultKwargs = { 'time_format': True, 'txt_flag': False } kwargs = { **defaultKwargs,", "False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) if single_col: GT_list =", "'txt_flag': False } kwargs = { **defaultKwargs, **kwargs } # my_df = pd.DataFrame(index=False)", "a transcript in txt (or csv), it returns a list of the paths", "datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) +", "sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date +", "Given a transcript in txt (or csv), it returns a list of the", "both: print(msg, end='') def write_my_csv(*args, **kwargs): \"\"\" Function to write csv files. args:", "Compare 2 lists, full path or only names if collections.Counter(all_audios) == collections.Counter(transcript_lists): if", "if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) if", "is empty\".format(current_folder_path)) return False def check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:] != ending_format_substring: print(\"Error in", "names for columns (matched to args) - path: output_path for the csv \"\"\"", "as well if both: print(msg, end='') def write_my_csv(*args, **kwargs): \"\"\" Function to write", "args) - path: output_path for the csv \"\"\" defaultKwargs = { 'time_format': True,", "kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f +", "import numpy as np import pandas as pd from os.path import join as", "transcript paths if confirm_with_transcript: transcript_path = locate_single_txt(folder_path) # Obtain list of paths from", "+ csv_path.split('/')[-1][0:-4] + today_date + time_f + '.csv' my_df.to_csv(full_output_csv_path, index=False) else: if kwargs['txt_flag']:", "single_col=False): transcript_path = locate_single_txt(folder_path) if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path,", "sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To double check with transcript paths if confirm_with_transcript: transcript_path =", "= kwargs['cols'] # check if csv file exists check_csv_exists(csv_path) if len(args) > 2:", "found in folder \"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts) != 1: print(\"Too", "check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) if single_col: GT_list = transcript_data[0].tolist()", "Input: - Folder path Output: - Path of the only transcript found in", "To-do: Add a function to only compare the filename, not the entire path", "if confirm_with_transcript: transcript_path = locate_single_txt(folder_path) # Obtain list of paths from transcript transcript_lists", "numpy as np import pandas as pd from os.path import join as pj", "to log''' #Always log file with open(log_file, mode) as f: f.write(msg) #If {both}", "import pandas as pd from os.path import join as pj from os.path import", "audios \"\"\" if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None,", "in folder \"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts) != 1: print(\"Too many", "empty \"\"\" if (len(os.listdir(current_folder_path)) != 0): return True else: print(\"Folder {} is empty\".format(current_folder_path))", "transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) if single_col: GT_list = transcript_data[0].tolist() else: GT_list", "{ **defaultKwargs, **kwargs } # my_df = pd.DataFrame(index=False) my_df = pd.DataFrame() csv_path =", "# check if csv file exists check_csv_exists(csv_path) if len(args) > 2: check_same_length(args[0], args[1])", "and/or adds to log''' #Always log file with open(log_file, mode) as f: f.write(msg)", "from os.path import join as pj from os.path import exists import collections import", "want to overwrite? (y)\") if input().lower() != 'y': print(\"File not modified\") sys.exit() ###", "'/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.csv' my_df.to_csv(full_output_csv_path, index=False)", "confirm_with_transcript = True, verbose = False): \"\"\" Confirm_with_transcript requires: file path in first", "csv files. args: - Columns for the csv (matched to the names) kwargs:", "locate_single_txt(src_dir, obj_format = 'txt'): \"\"\" Input: - Folder path Output: - Path of", "path: output_path for the csv \"\"\" defaultKwargs = { 'time_format': True, 'txt_flag': False", "adds to log''' #Always log file with open(log_file, mode) as f: f.write(msg) #If", "index_col=False) if single_col: GT_list = transcript_data[0].tolist() else: GT_list = transcript_data[1].tolist() return GT_list def", "csv_flag=False, single_col=False): transcript_path = locate_single_txt(folder_path) if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data =", "+ csv_path.split('/')[-1][0:-4] + today_date + time_f + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else:", "log''' #Always log file with open(log_file, mode) as f: f.write(msg) #If {both} is", "'txt'): \"\"\" Input: - Folder path Output: - Path of the only transcript", "my repos \"\"\" import glob import os import sys import shutil import re", "# Compare 2 lists, full path or only names if collections.Counter(all_audios) == collections.Counter(transcript_lists):", "# Obtain list of paths from transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2", "Obtain list of paths from transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2 lists,", "check_same_length(args[0], args[1]) elif len(args) > 3: check_same_length(args[1], args[2]) idx = 0 for current_list", "# # def locate_single_txt(src_dir, obj_format = 'txt'): \"\"\" Input: - Folder path Output:", "repos \"\"\" import glob import os import sys import shutil import re import", "sys import shutil import re import numpy as np import pandas as pd", "well if both: print(msg, end='') def write_my_csv(*args, **kwargs): \"\"\" Function to write csv", "# my_df = pd.DataFrame(index=False) my_df = pd.DataFrame() csv_path = kwargs['path'] columns_values = kwargs['cols']", "Return true if folder is NOT empty \"\"\" if (len(os.listdir(current_folder_path)) != 0): return", "idx = 0 for current_list in args: my_df[columns_values[idx]] = current_list idx = idx", "get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path = locate_single_txt(folder_path) if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data", "csv (matched to the names) kwargs: - cols: List of names for columns", "path in first column To-do: check if in csv file the header_none will", "transcript_data[1].tolist() return GT_list def get_list_of_audios(folder_path, audio_extension = 'wav', confirm_with_transcript = True, verbose =", "= pd.DataFrame() csv_path = kwargs['path'] columns_values = kwargs['cols'] # check if csv file", "mode) as f: f.write(msg) #If {both} is true, print to terminal as well", "'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) if single_col: GT_list = transcript_data[0].tolist() else:", "header as the first row. To-do: Add a function to only compare the", "sys.exit() def check_same_length(list1, list2): if len(list1) != len(list2): print(\"Error, your list1 and list2", "len(all_texts) != 1: print(\"Too many transcripts! Please use only 1\") else: input_transcript_path =", "will throw count the header as the first row. To-do: Add a function", "the filename, not the entire path \"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To", "csv_path = kwargs['path'] columns_values = kwargs['cols'] # check if csv file exists check_csv_exists(csv_path)", "idx + 1 today_date = '_' + str(datetime.date.today()) datetime_object = datetime.datetime.now() time_f =", "= '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.csv' my_df.to_csv(full_output_csv_path,", "os.path import join as pj from os.path import exists import collections import datetime", "f: f.write(msg) #If {both} is true, print to terminal as well if both:", "0 for current_list in args: my_df[columns_values[idx]] = current_list idx = idx + 1", "log file with open(log_file, mode) as f: f.write(msg) #If {both} is true, print", "print(\"CSV file already exists, do you want to overwrite? (y)\") if input().lower() !=", "from UNIX-based systems This should be syncronized for all of my repos \"\"\"", "that prints and/or adds to log''' #Always log file with open(log_file, mode) as", "in txt (or csv), it returns a list of the paths of all", "# def log_message(msg, log_file, mode, both=True): '''Function that prints and/or adds to log'''", "to terminal as well if both: print(msg, end='') def write_my_csv(*args, **kwargs): \"\"\" Function", "len(list1) != len(list2): print(\"Error, your list1 and list2 have different lengths\") sys.exit() def", "True else: print(\"Folder {} is empty\".format(current_folder_path)) return False def check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:]", "# # # Store TXT and CSV # # # def log_message(msg, log_file,", "(matched to the names) kwargs: - cols: List of names for columns (matched", "file the header_none will throw count the header as the first row. To-do:", "= '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path, index=False) ### # #", "both=True): '''Function that prints and/or adds to log''' #Always log file with open(log_file,", "today_date + time_f + '.csv' my_df.to_csv(full_output_csv_path, index=False) else: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1])", "kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t',", "kwargs = { **defaultKwargs, **kwargs } # my_df = pd.DataFrame(index=False) my_df = pd.DataFrame()", "Please use only 1\") else: input_transcript_path = all_texts[0] print(input_transcript_path) print(\"\\n\") return input_transcript_path def", "not modified\") sys.exit() ### # # # Store TXT and CSV # #", "header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date", "collections.Counter(all_audios) == collections.Counter(transcript_lists): if verbose: print(\"Transcript and audios are consistent\") else: print(\"Transcript paths", "count the header as the first row. To-do: Add a function to only", "- Path of the only transcript found in folder \"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir,", "Path of the only transcript found in folder \"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format))", "my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] +", "full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.csv'", "you want to overwrite? (y)\") if input().lower() != 'y': print(\"File not modified\") sys.exit()", "if len(list1) != len(list2): print(\"Error, your list1 and list2 have different lengths\") sys.exit()", "GT_list def get_list_of_audios(folder_path, audio_extension = 'wav', confirm_with_transcript = True, verbose = False): \"\"\"", "only 1\") else: input_transcript_path = all_texts[0] print(input_transcript_path) print(\"\\n\") return input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False):", "it returns a list of the paths of all the audios \"\"\" if", "False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) path_list = transcript_data[0].tolist() return", "only transcript found in folder \"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts) !=", "import shutil import re import numpy as np import pandas as pd from", "else: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path,", "transcript_data[0].tolist() else: GT_list = transcript_data[1].tolist() return GT_list def get_list_of_audios(folder_path, audio_extension = 'wav', confirm_with_transcript", "= all_texts[0] print(input_transcript_path) print(\"\\n\") return input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given a transcript", "mode, both=True): '''Function that prints and/or adds to log''' #Always log file with", "+ '/' + csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path =", "the entire path \"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To double check with", "- path: output_path for the csv \"\"\" defaultKwargs = { 'time_format': True, 'txt_flag':", "returns a list of the paths of all the audios \"\"\" if csv_flag", "files. args: - Columns for the csv (matched to the names) kwargs: -", "Add a function to only compare the filename, not the entire path \"\"\"", "\"\"\" Utilities functions to check and find files from UNIX-based systems This should", "elif len(args) > 3: check_same_length(args[1], args[2]) idx = 0 for current_list in args:", "# # # Files processes # # # def locate_single_txt(src_dir, obj_format = 'txt'):", "Check processes # # # def check_empty_folder(current_folder_path): \"\"\" Return true if folder is", "and find files from UNIX-based systems This should be syncronized for all of", "csv_flag=False): \"\"\" Given a transcript in txt (or csv), it returns a list", "transcript_data[0].tolist() return path_list def get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path = locate_single_txt(folder_path) if csv_flag ==", "if (len(os.listdir(current_folder_path)) != 0): return True else: print(\"Folder {} is empty\".format(current_folder_path)) return False", "csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) if single_col:", "List of names for columns (matched to args) - path: output_path for the", "confirm_with_transcript: transcript_path = locate_single_txt(folder_path) # Obtain list of paths from transcript transcript_lists =", "{}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1, list2): if len(list1) != len(list2): print(\"Error, your list1 and", "args[2]) idx = 0 for current_list in args: my_df[columns_values[idx]] = current_list idx =", "format, must end with {}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1, list2): if len(list1) != len(list2):", "\"\"\" if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False)", "(y)\") if input().lower() != 'y': print(\"File not modified\") sys.exit() ### # # #", "the audios \"\"\" if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\",", "full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path, index=False) ### #", "entire path \"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To double check with transcript", "True, 'txt_flag': False } kwargs = { **defaultKwargs, **kwargs } # my_df =", "if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f", "datetime_object = datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path =", "transcripts! Please use only 1\") else: input_transcript_path = all_texts[0] print(input_transcript_path) print(\"\\n\") return input_transcript_path", "Store TXT and CSV # # # def log_message(msg, log_file, mode, both=True): '''Function", "= pd.DataFrame(index=False) my_df = pd.DataFrame() csv_path = kwargs['path'] columns_values = kwargs['cols'] # check", "sep=\"\\t\", header=None, index_col=False) path_list = transcript_data[0].tolist() return path_list def get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path", "header=None, index_col=False) if single_col: GT_list = transcript_data[0].tolist() else: GT_list = transcript_data[1].tolist() return GT_list", "time_f + '.csv' my_df.to_csv(full_output_csv_path, index=False) else: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/'", "if csv file exists check_csv_exists(csv_path) if len(args) > 2: check_same_length(args[0], args[1]) elif len(args)", "the functions: # - check_empty_folder .- ### # # # Check processes #", "'.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4]", "csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path, index=False) ### # # # Files processes # #", "as pd from os.path import join as pj from os.path import exists import", "print(\"Folder {} is empty\".format(current_folder_path)) return False def check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:] != ending_format_substring:", "# # Store TXT and CSV # # # def log_message(msg, log_file, mode,", "exists import collections import datetime ## List of the functions: # - check_empty_folder", "# # Files processes # # # def locate_single_txt(src_dir, obj_format = 'txt'): \"\"\"", "def log_message(msg, log_file, mode, both=True): '''Function that prints and/or adds to log''' #Always", "transcript_path = locate_single_txt(folder_path) if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\",", "current_list idx = idx + 1 today_date = '_' + str(datetime.date.today()) datetime_object =", "False): \"\"\" Confirm_with_transcript requires: file path in first column To-do: check if in", "from os.path import exists import collections import datetime ## List of the functions:", "= sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2 lists, full path or only names if collections.Counter(all_audios)", "This should be syncronized for all of my repos \"\"\" import glob import", "CSV # # # def log_message(msg, log_file, mode, both=True): '''Function that prints and/or", "os import sys import shutil import re import numpy as np import pandas", "current_list in args: my_df[columns_values[idx]] = current_list idx = idx + 1 today_date =", "= transcript_data[0].tolist() else: GT_list = transcript_data[1].tolist() return GT_list def get_list_of_audios(folder_path, audio_extension = 'wav',", "to only compare the filename, not the entire path \"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path,", "== False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) if single_col: GT_list", "must end with {}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1, list2): if len(list1) != len(list2): print(\"Error,", "exists(csv_path): print(\"CSV file already exists, do you want to overwrite? (y)\") if input().lower()", "obj_format)) if len(all_texts) != 1: print(\"Too many transcripts! Please use only 1\") else:", "only compare the filename, not the entire path \"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension)))", "+ csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path, index=False) ### # # # Files processes #", "'/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path, index=False) ### # # #", "csv_path.split('/')[-1][0:-4] + today_date + time_f + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path", "end with {}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1, list2): if len(list1) != len(list2): print(\"Error, your", "today_date = '_' + str(datetime.date.today()) datetime_object = datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if", "= locate_single_txt(folder_path) if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None,", "file with open(log_file, mode) as f: f.write(msg) #If {both} is true, print to", "= False): \"\"\" Confirm_with_transcript requires: file path in first column To-do: check if", "kwargs['path'] columns_values = kwargs['cols'] # check if csv file exists check_csv_exists(csv_path) if len(args)", "ending_format_substring: print(\"Error in the format, must end with {}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1, list2):", "if single_col: GT_list = transcript_data[0].tolist() else: GT_list = transcript_data[1].tolist() return GT_list def get_list_of_audios(folder_path,", "all_texts[0] print(input_transcript_path) print(\"\\n\") return input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given a transcript in", "requires: file path in first column To-do: check if in csv file the", "2 lists, full path or only names if collections.Counter(all_audios) == collections.Counter(transcript_lists): if verbose:", "the only transcript found in folder \"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts)", "sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path,", "print(msg, end='') def write_my_csv(*args, **kwargs): \"\"\" Function to write csv files. args: -", "== collections.Counter(transcript_lists): if verbose: print(\"Transcript and audios are consistent\") else: print(\"Transcript paths and", "{} is empty\".format(current_folder_path)) return False def check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:] != ending_format_substring: print(\"Error", "list1 and list2 have different lengths\") sys.exit() def check_csv_exists(csv_path): if exists(csv_path): print(\"CSV file", "\"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4]", "index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f", "(matched to args) - path: output_path for the csv \"\"\" defaultKwargs = {", "def check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:] != ending_format_substring: print(\"Error in the format, must end", "filename, not the entire path \"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To double", "path Output: - Path of the only transcript found in folder \"\"\" all_texts", "header=None, index_col=False) path_list = transcript_data[0].tolist() return path_list def get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path =", "path \"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To double check with transcript paths", "# def check_empty_folder(current_folder_path): \"\"\" Return true if folder is NOT empty \"\"\" if", "False def check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:] != ending_format_substring: print(\"Error in the format, must", "= 'txt'): \"\"\" Input: - Folder path Output: - Path of the only", "3: check_same_length(args[1], args[2]) idx = 0 for current_list in args: my_df[columns_values[idx]] = current_list", "paths if confirm_with_transcript: transcript_path = locate_single_txt(folder_path) # Obtain list of paths from transcript", "return path_list def get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path = locate_single_txt(folder_path) if csv_flag == False:", "pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) path_list = transcript_data[0].tolist() return path_list def get_list_of_GT(folder_path, csv_flag=False, single_col=False):", "import exists import collections import datetime ## List of the functions: # -", "verbose = False): \"\"\" Confirm_with_transcript requires: file path in first column To-do: check", "check and find files from UNIX-based systems This should be syncronized for all", "of my repos \"\"\" import glob import os import sys import shutil import", "exists check_csv_exists(csv_path) if len(args) > 2: check_same_length(args[0], args[1]) elif len(args) > 3: check_same_length(args[1],", "already exists, do you want to overwrite? (y)\") if input().lower() != 'y': print(\"File", "sep=\"\\t\", header=None, index_col=False) if single_col: GT_list = transcript_data[0].tolist() else: GT_list = transcript_data[1].tolist() return", "= sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To double check with transcript paths if confirm_with_transcript: transcript_path", "# Store TXT and CSV # # # def log_message(msg, log_file, mode, both=True):", "of the paths of all the audios \"\"\" if csv_flag == False: check_ending_format(transcript_path,", "check with transcript paths if confirm_with_transcript: transcript_path = locate_single_txt(folder_path) # Obtain list of", "!= 1: print(\"Too many transcripts! Please use only 1\") else: input_transcript_path = all_texts[0]", "+ '/' + csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path, index=False) ### # # # Files", "get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given a transcript in txt (or csv), it returns a", "check_empty_folder(current_folder_path): \"\"\" Return true if folder is NOT empty \"\"\" if (len(os.listdir(current_folder_path)) !=", "**defaultKwargs, **kwargs } # my_df = pd.DataFrame(index=False) my_df = pd.DataFrame() csv_path = kwargs['path']", "of the only transcript found in folder \"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if", "systems This should be syncronized for all of my repos \"\"\" import glob", "} # my_df = pd.DataFrame(index=False) my_df = pd.DataFrame() csv_path = kwargs['path'] columns_values =", "empty\".format(current_folder_path)) return False def check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:] != ending_format_substring: print(\"Error in the", "check_empty_folder .- ### # # # Check processes # # # def check_empty_folder(current_folder_path):", "audio_extension))) ### To double check with transcript paths if confirm_with_transcript: transcript_path = locate_single_txt(folder_path)", "Files processes # # # def locate_single_txt(src_dir, obj_format = 'txt'): \"\"\" Input: -", "= glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts) != 1: print(\"Too many transcripts! Please use only", "terminal as well if both: print(msg, end='') def write_my_csv(*args, **kwargs): \"\"\" Function to", "glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts) != 1: print(\"Too many transcripts! Please use only 1\")", "pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) if single_col: GT_list = transcript_data[0].tolist() else: GT_list = transcript_data[1].tolist()", "\"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To double check with transcript paths if", "to args) - path: output_path for the csv \"\"\" defaultKwargs = { 'time_format':", "'/' + csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path, index=False) ### # # # Files processes", "sys.exit() def check_csv_exists(csv_path): if exists(csv_path): print(\"CSV file already exists, do you want to", "{ 'time_format': True, 'txt_flag': False } kwargs = { **defaultKwargs, **kwargs } #", "print(\"Error, your list1 and list2 have different lengths\") sys.exit() def check_csv_exists(csv_path): if exists(csv_path):", "# def locate_single_txt(src_dir, obj_format = 'txt'): \"\"\" Input: - Folder path Output: -", "### To double check with transcript paths if confirm_with_transcript: transcript_path = locate_single_txt(folder_path) #", "and audios are consistent\") else: print(\"Transcript paths and names of files does not", "audios are consistent\") else: print(\"Transcript paths and names of files does not match\")", "+ str(datetime.date.today()) datetime_object = datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']: if kwargs['txt_flag']:", "GT_list = transcript_data[1].tolist() return GT_list def get_list_of_audios(folder_path, audio_extension = 'wav', confirm_with_transcript = True,", "folder is NOT empty \"\"\" if (len(os.listdir(current_folder_path)) != 0): return True else: print(\"Folder", "locate_single_txt(folder_path) if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False)", "are consistent\") else: print(\"Transcript paths and names of files does not match\") sys.exit()", "= 0 for current_list in args: my_df[columns_values[idx]] = current_list idx = idx +", "my_df.to_csv(full_output_csv_path, index=False) else: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] +", "Output: - Path of the only transcript found in folder \"\"\" all_texts =", "if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) path_list", "+ 1 today_date = '_' + str(datetime.date.today()) datetime_object = datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour,", "time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/'", "kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date +", "output_path for the csv \"\"\" defaultKwargs = { 'time_format': True, 'txt_flag': False }", "full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.txt'", "NOT empty \"\"\" if (len(os.listdir(current_folder_path)) != 0): return True else: print(\"Folder {} is", "import datetime ## List of the functions: # - check_empty_folder .- ### #", "path_list = transcript_data[0].tolist() return path_list def get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path = locate_single_txt(folder_path) if", "transcript found in folder \"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts) != 1:", "index_col=False) path_list = transcript_data[0].tolist() return path_list def get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path = locate_single_txt(folder_path)", "obj_format = 'txt'): \"\"\" Input: - Folder path Output: - Path of the", "of names for columns (matched to args) - path: output_path for the csv", "= 'wav', confirm_with_transcript = True, verbose = False): \"\"\" Confirm_with_transcript requires: file path", "or only names if collections.Counter(all_audios) == collections.Counter(transcript_lists): if verbose: print(\"Transcript and audios are", "False } kwargs = { **defaultKwargs, **kwargs } # my_df = pd.DataFrame(index=False) my_df", "1 today_date = '_' + str(datetime.date.today()) datetime_object = datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute)", "with {}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1, list2): if len(list1) != len(list2): print(\"Error, your list1", "### # # # Store TXT and CSV # # # def log_message(msg,", "!= 0): return True else: print(\"Folder {} is empty\".format(current_folder_path)) return False def check_ending_format(current_file_path,", "os.path import exists import collections import datetime ## List of the functions: #", "in csv file the header_none will throw count the header as the first", "have different lengths\") sys.exit() def check_csv_exists(csv_path): if exists(csv_path): print(\"CSV file already exists, do", "idx = idx + 1 today_date = '_' + str(datetime.date.today()) datetime_object = datetime.datetime.now()", "- cols: List of names for columns (matched to args) - path: output_path", "file exists check_csv_exists(csv_path) if len(args) > 2: check_same_length(args[0], args[1]) elif len(args) > 3:", "def check_empty_folder(current_folder_path): \"\"\" Return true if folder is NOT empty \"\"\" if (len(os.listdir(current_folder_path))", "use only 1\") else: input_transcript_path = all_texts[0] print(input_transcript_path) print(\"\\n\") return input_transcript_path def get_pathsList_from_transcript(transcript_path,", "index=False) else: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.txt'", "== False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) path_list = transcript_data[0].tolist()", "= locate_single_txt(folder_path) # Obtain list of paths from transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) #", "paths from transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2 lists, full path or", "return True else: print(\"Folder {} is empty\".format(current_folder_path)) return False def check_ending_format(current_file_path, ending_format_substring): if", "\"\"\" Given a transcript in txt (or csv), it returns a list of", "transcript_path = locate_single_txt(folder_path) # Obtain list of paths from transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path))", "join as pj from os.path import exists import collections import datetime ## List", "the csv (matched to the names) kwargs: - cols: List of names for", "str(datetime.date.today()) datetime_object = datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path", "kwargs: - cols: List of names for columns (matched to args) - path:", "of all the audios \"\"\" if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data =", "if both: print(msg, end='') def write_my_csv(*args, **kwargs): \"\"\" Function to write csv files.", "<reponame>luisst/Audio_Performance_Evaluation<filename>my_files_utils.py<gh_stars>0 #!/usr/bin/env python3 \"\"\" Utilities functions to check and find files from UNIX-based", "csv \"\"\" defaultKwargs = { 'time_format': True, 'txt_flag': False } kwargs = {", "# # # def log_message(msg, log_file, mode, both=True): '''Function that prints and/or adds", "overwrite? (y)\") if input().lower() != 'y': print(\"File not modified\") sys.exit() ### # #", "'.csv' my_df.to_csv(full_output_csv_path, index=False) ### # # # Files processes # # # def", "if kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date", "index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path, index=False)", "python3 \"\"\" Utilities functions to check and find files from UNIX-based systems This", "folder \"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts) != 1: print(\"Too many transcripts!", "(len(os.listdir(current_folder_path)) != 0): return True else: print(\"Folder {} is empty\".format(current_folder_path)) return False def", "if in csv file the header_none will throw count the header as the", "= { **defaultKwargs, **kwargs } # my_df = pd.DataFrame(index=False) my_df = pd.DataFrame() csv_path", "list of the paths of all the audios \"\"\" if csv_flag == False:", "!= 'y': print(\"File not modified\") sys.exit() ### # # # Store TXT and", "a list of the paths of all the audios \"\"\" if csv_flag ==", "def get_list_of_audios(folder_path, audio_extension = 'wav', confirm_with_transcript = True, verbose = False): \"\"\" Confirm_with_transcript", "print(\"Too many transcripts! Please use only 1\") else: input_transcript_path = all_texts[0] print(input_transcript_path) print(\"\\n\")", "check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) path_list = transcript_data[0].tolist() return path_list", "open(log_file, mode) as f: f.write(msg) #If {both} is true, print to terminal as", "print to terminal as well if both: print(msg, end='') def write_my_csv(*args, **kwargs): \"\"\"", "not the entire path \"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To double check", "sys.exit() ### # # # Store TXT and CSV # # # def", "from transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2 lists, full path or only", "'_' + str(datetime.date.today()) datetime_object = datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']: if", "my_df = pd.DataFrame(index=False) my_df = pd.DataFrame() csv_path = kwargs['path'] columns_values = kwargs['cols'] #", "+ '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.csv' my_df.to_csv(full_output_csv_path, index=False) else:", "def write_my_csv(*args, **kwargs): \"\"\" Function to write csv files. args: - Columns for", "kwargs['cols'] # check if csv file exists check_csv_exists(csv_path) if len(args) > 2: check_same_length(args[0],", "full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False)", "# # def log_message(msg, log_file, mode, both=True): '''Function that prints and/or adds to", "to overwrite? (y)\") if input().lower() != 'y': print(\"File not modified\") sys.exit() ### #", "syncronized for all of my repos \"\"\" import glob import os import sys", "len(list2): print(\"Error, your list1 and list2 have different lengths\") sys.exit() def check_csv_exists(csv_path): if", "np import pandas as pd from os.path import join as pj from os.path", "- Columns for the csv (matched to the names) kwargs: - cols: List", "'/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path", "if len(args) > 2: check_same_length(args[0], args[1]) elif len(args) > 3: check_same_length(args[1], args[2]) idx", "the header as the first row. To-do: Add a function to only compare", "only names if collections.Counter(all_audios) == collections.Counter(transcript_lists): if verbose: print(\"Transcript and audios are consistent\")", "locate_single_txt(folder_path) # Obtain list of paths from transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) # Compare", "\"\"\" if (len(os.listdir(current_folder_path)) != 0): return True else: print(\"Folder {} is empty\".format(current_folder_path)) return", "datetime_object.minute) if kwargs['time_format']: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] +", "log_file, mode, both=True): '''Function that prints and/or adds to log''' #Always log file", "else: GT_list = transcript_data[1].tolist() return GT_list def get_list_of_audios(folder_path, audio_extension = 'wav', confirm_with_transcript =", "\"\"\" import glob import os import sys import shutil import re import numpy", "- check_empty_folder .- ### # # # Check processes # # # def", "is true, print to terminal as well if both: print(msg, end='') def write_my_csv(*args,", "List of the functions: # - check_empty_folder .- ### # # # Check", "Columns for the csv (matched to the names) kwargs: - cols: List of", "in first column To-do: check if in csv file the header_none will throw", "transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) path_list = transcript_data[0].tolist() return path_list def get_list_of_GT(folder_path,", "0): return True else: print(\"Folder {} is empty\".format(current_folder_path)) return False def check_ending_format(current_file_path, ending_format_substring):", "throw count the header as the first row. To-do: Add a function to", "file already exists, do you want to overwrite? (y)\") if input().lower() != 'y':", "= transcript_data[1].tolist() return GT_list def get_list_of_audios(folder_path, audio_extension = 'wav', confirm_with_transcript = True, verbose", "row. To-do: Add a function to only compare the filename, not the entire", "as np import pandas as pd from os.path import join as pj from", "len(args) > 2: check_same_length(args[0], args[1]) elif len(args) > 3: check_same_length(args[1], args[2]) idx =", "collections import datetime ## List of the functions: # - check_empty_folder .- ###", "a function to only compare the filename, not the entire path \"\"\" all_audios", "list2 have different lengths\") sys.exit() def check_csv_exists(csv_path): if exists(csv_path): print(\"CSV file already exists,", "**kwargs } # my_df = pd.DataFrame(index=False) my_df = pd.DataFrame() csv_path = kwargs['path'] columns_values", "True, verbose = False): \"\"\" Confirm_with_transcript requires: file path in first column To-do:", "different lengths\") sys.exit() def check_csv_exists(csv_path): if exists(csv_path): print(\"CSV file already exists, do you", "first row. To-do: Add a function to only compare the filename, not the", "# # Check processes # # # def check_empty_folder(current_folder_path): \"\"\" Return true if", "first column To-do: check if in csv file the header_none will throw count", "import glob import os import sys import shutil import re import numpy as", "!= ending_format_substring: print(\"Error in the format, must end with {}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1,", "csv file exists check_csv_exists(csv_path) if len(args) > 2: check_same_length(args[0], args[1]) elif len(args) >", "args: - Columns for the csv (matched to the names) kwargs: - cols:", "modified\") sys.exit() ### # # # Store TXT and CSV # # #", "'''Function that prints and/or adds to log''' #Always log file with open(log_file, mode)", "\"\"\" Input: - Folder path Output: - Path of the only transcript found", "my_df[columns_values[idx]] = current_list idx = idx + 1 today_date = '_' + str(datetime.date.today())", "def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given a transcript in txt (or csv), it returns", "consistent\") else: print(\"Transcript paths and names of files does not match\") sys.exit() return", "for all of my repos \"\"\" import glob import os import sys import", "> 2: check_same_length(args[0], args[1]) elif len(args) > 3: check_same_length(args[1], args[2]) idx = 0", "# Check processes # # # def check_empty_folder(current_folder_path): \"\"\" Return true if folder", "csv file the header_none will throw count the header as the first row.", "for current_list in args: my_df[columns_values[idx]] = current_list idx = idx + 1 today_date", "many transcripts! Please use only 1\") else: input_transcript_path = all_texts[0] print(input_transcript_path) print(\"\\n\") return", "# Files processes # # # def locate_single_txt(src_dir, obj_format = 'txt'): \"\"\" Input:", "= '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else:", "lists, full path or only names if collections.Counter(all_audios) == collections.Counter(transcript_lists): if verbose: print(\"Transcript", "the first row. To-do: Add a function to only compare the filename, not", "'/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.txt' my_df.to_csv(full_output_csv_path, header=False,", "args: my_df[columns_values[idx]] = current_list idx = idx + 1 today_date = '_' +", "def check_csv_exists(csv_path): if exists(csv_path): print(\"CSV file already exists, do you want to overwrite?", "### # # # Files processes # # # def locate_single_txt(src_dir, obj_format =", "in args: my_df[columns_values[idx]] = current_list idx = idx + 1 today_date = '_'", "path or only names if collections.Counter(all_audios) == collections.Counter(transcript_lists): if verbose: print(\"Transcript and audios", "be syncronized for all of my repos \"\"\" import glob import os import", "\"\"\" all_texts = glob.glob(\"{}/*.{}\".format(src_dir, obj_format)) if len(all_texts) != 1: print(\"Too many transcripts! Please", "today_date + time_f + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1])", "prints and/or adds to log''' #Always log file with open(log_file, mode) as f:", "return False def check_ending_format(current_file_path, ending_format_substring): if current_file_path[-3:] != ending_format_substring: print(\"Error in the format,", "if collections.Counter(all_audios) == collections.Counter(transcript_lists): if verbose: print(\"Transcript and audios are consistent\") else: print(\"Transcript", "check if csv file exists check_csv_exists(csv_path) if len(args) > 2: check_same_length(args[0], args[1]) elif", "import collections import datetime ## List of the functions: # - check_empty_folder .-", "header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.csv'", "len(args) > 3: check_same_length(args[1], args[2]) idx = 0 for current_list in args: my_df[columns_values[idx]]", "get_list_of_audios(folder_path, audio_extension = 'wav', confirm_with_transcript = True, verbose = False): \"\"\" Confirm_with_transcript requires:", "transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2 lists, full path or only names", "pandas as pd from os.path import join as pj from os.path import exists", "\"\"\" Function to write csv files. args: - Columns for the csv (matched", "'y': print(\"File not modified\") sys.exit() ### # # # Store TXT and CSV", "pd.DataFrame() csv_path = kwargs['path'] columns_values = kwargs['cols'] # check if csv file exists", "else: input_transcript_path = all_texts[0] print(input_transcript_path) print(\"\\n\") return input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given", "end='') def write_my_csv(*args, **kwargs): \"\"\" Function to write csv files. args: - Columns", "## List of the functions: # - check_empty_folder .- ### # # #", "Folder path Output: - Path of the only transcript found in folder \"\"\"", "# - check_empty_folder .- ### # # # Check processes # # #", "input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given a transcript in txt (or csv), it", "should be syncronized for all of my repos \"\"\" import glob import os", "processes # # # def check_empty_folder(current_folder_path): \"\"\" Return true if folder is NOT", "{both} is true, print to terminal as well if both: print(msg, end='') def", "for the csv (matched to the names) kwargs: - cols: List of names", "pj from os.path import exists import collections import datetime ## List of the", "**kwargs): \"\"\" Function to write csv files. args: - Columns for the csv", "- Folder path Output: - Path of the only transcript found in folder", "# # # def locate_single_txt(src_dir, obj_format = 'txt'): \"\"\" Input: - Folder path", "file path in first column To-do: check if in csv file the header_none", "# # def check_empty_folder(current_folder_path): \"\"\" Return true if folder is NOT empty \"\"\"", "all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ### To double check with transcript paths if confirm_with_transcript:", "check_same_length(list1, list2): if len(list1) != len(list2): print(\"Error, your list1 and list2 have different", "if exists(csv_path): print(\"CSV file already exists, do you want to overwrite? (y)\") if", "full path or only names if collections.Counter(all_audios) == collections.Counter(transcript_lists): if verbose: print(\"Transcript and", "files from UNIX-based systems This should be syncronized for all of my repos", "if folder is NOT empty \"\"\" if (len(os.listdir(current_folder_path)) != 0): return True else:", "#If {both} is true, print to terminal as well if both: print(msg, end='')", "To double check with transcript paths if confirm_with_transcript: transcript_path = locate_single_txt(folder_path) # Obtain", "Confirm_with_transcript requires: file path in first column To-do: check if in csv file", "= True, verbose = False): \"\"\" Confirm_with_transcript requires: file path in first column", "functions: # - check_empty_folder .- ### # # # Check processes # #", "all the audios \"\"\" if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data = pd.read_csv(transcript_path,", "To-do: check if in csv file the header_none will throw count the header", "import re import numpy as np import pandas as pd from os.path import", "f.write(msg) #If {both} is true, print to terminal as well if both: print(msg,", "# # # Check processes # # # def check_empty_folder(current_folder_path): \"\"\" Return true", "def get_list_of_GT(folder_path, csv_flag=False, single_col=False): transcript_path = locate_single_txt(folder_path) if csv_flag == False: check_ending_format(transcript_path, 'txt')", "find files from UNIX-based systems This should be syncronized for all of my", "index=False) ### # # # Files processes # # # def locate_single_txt(src_dir, obj_format", "+ '.csv' my_df.to_csv(full_output_csv_path, index=False) ### # # # Files processes # # #", "+ '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t',", "true if folder is NOT empty \"\"\" if (len(os.listdir(current_folder_path)) != 0): return True", "check_same_length(args[1], args[2]) idx = 0 for current_list in args: my_df[columns_values[idx]] = current_list idx", "datetime ## List of the functions: # - check_empty_folder .- ### # #", "log_message(msg, log_file, mode, both=True): '''Function that prints and/or adds to log''' #Always log", "if input().lower() != 'y': print(\"File not modified\") sys.exit() ### # # # Store", "transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2 lists, full path or only names if", "names if collections.Counter(all_audios) == collections.Counter(transcript_lists): if verbose: print(\"Transcript and audios are consistent\") else:", "all of my repos \"\"\" import glob import os import sys import shutil", "= '_' + str(datetime.date.today()) datetime_object = datetime.datetime.now() time_f = \"-{:d}_{:02d}\".format(datetime_object.hour, datetime_object.minute) if kwargs['time_format']:", "re import numpy as np import pandas as pd from os.path import join", "'time_format': True, 'txt_flag': False } kwargs = { **defaultKwargs, **kwargs } # my_df", "and list2 have different lengths\") sys.exit() def check_csv_exists(csv_path): if exists(csv_path): print(\"CSV file already", "GT_list = transcript_data[0].tolist() else: GT_list = transcript_data[1].tolist() return GT_list def get_list_of_audios(folder_path, audio_extension =", "check_csv_exists(csv_path) if len(args) > 2: check_same_length(args[0], args[1]) elif len(args) > 3: check_same_length(args[1], args[2])", "UNIX-based systems This should be syncronized for all of my repos \"\"\" import", "write csv files. args: - Columns for the csv (matched to the names)", "check_csv_exists(csv_path): if exists(csv_path): print(\"CSV file already exists, do you want to overwrite? (y)\")", "the csv \"\"\" defaultKwargs = { 'time_format': True, 'txt_flag': False } kwargs =", "the paths of all the audios \"\"\" if csv_flag == False: check_ending_format(transcript_path, 'txt')", "+ today_date + time_f + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path =", "cols: List of names for columns (matched to args) - path: output_path for", "for columns (matched to args) - path: output_path for the csv \"\"\" defaultKwargs", "return input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given a transcript in txt (or csv),", "return GT_list def get_list_of_audios(folder_path, audio_extension = 'wav', confirm_with_transcript = True, verbose = False):", "print(\"Transcript and audios are consistent\") else: print(\"Transcript paths and names of files does", "# # # def check_empty_folder(current_folder_path): \"\"\" Return true if folder is NOT empty", "lengths\") sys.exit() def check_csv_exists(csv_path): if exists(csv_path): print(\"CSV file already exists, do you want", "sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2 lists, full path or only names if collections.Counter(all_audios) ==", "to the names) kwargs: - cols: List of names for columns (matched to", "compare the filename, not the entire path \"\"\" all_audios = sorted(glob.glob(\"{}/*.{}\".format(folder_path, audio_extension))) ###", ".- ### # # # Check processes # # # def check_empty_folder(current_folder_path): \"\"\"", "in the format, must end with {}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1, list2): if len(list1)", "if current_file_path[-3:] != ending_format_substring: print(\"Error in the format, must end with {}!\".format(ending_format_substring)) sys.exit()", "print(\"File not modified\") sys.exit() ### # # # Store TXT and CSV #", "= kwargs['path'] columns_values = kwargs['cols'] # check if csv file exists check_csv_exists(csv_path) if", "print(input_transcript_path) print(\"\\n\") return input_transcript_path def get_pathsList_from_transcript(transcript_path, csv_flag=False): \"\"\" Given a transcript in txt", "columns (matched to args) - path: output_path for the csv \"\"\" defaultKwargs =", "### # # # Check processes # # # def check_empty_folder(current_folder_path): \"\"\" Return", "else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.csv' my_df.to_csv(full_output_csv_path, index=False) ###", "+ time_f + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) +", "check if in csv file the header_none will throw count the header as", "+ time_f + '.csv' my_df.to_csv(full_output_csv_path, index=False) else: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) +", "as pj from os.path import exists import collections import datetime ## List of", "function to only compare the filename, not the entire path \"\"\" all_audios =", "with transcript paths if confirm_with_transcript: transcript_path = locate_single_txt(folder_path) # Obtain list of paths", "shutil import re import numpy as np import pandas as pd from os.path", "else: print(\"Transcript paths and names of files does not match\") sys.exit() return all_audios", "if len(all_texts) != 1: print(\"Too many transcripts! Please use only 1\") else: input_transcript_path", "double check with transcript paths if confirm_with_transcript: transcript_path = locate_single_txt(folder_path) # Obtain list", "ending_format_substring): if current_file_path[-3:] != ending_format_substring: print(\"Error in the format, must end with {}!\".format(ending_format_substring))", "#Always log file with open(log_file, mode) as f: f.write(msg) #If {both} is true,", "import sys import shutil import re import numpy as np import pandas as", "my_df.to_csv(full_output_csv_path, index=False) ### # # # Files processes # # # def locate_single_txt(src_dir,", "input().lower() != 'y': print(\"File not modified\") sys.exit() ### # # # Store TXT", "txt (or csv), it returns a list of the paths of all the", "Function to write csv files. args: - Columns for the csv (matched to", "my_df = pd.DataFrame() csv_path = kwargs['path'] columns_values = kwargs['cols'] # check if csv", "import os import sys import shutil import re import numpy as np import", "glob import os import sys import shutil import re import numpy as np", "true, print to terminal as well if both: print(msg, end='') def write_my_csv(*args, **kwargs):", "the format, must end with {}!\".format(ending_format_substring)) sys.exit() def check_same_length(list1, list2): if len(list1) !=", "if verbose: print(\"Transcript and audios are consistent\") else: print(\"Transcript paths and names of", "exists, do you want to overwrite? (y)\") if input().lower() != 'y': print(\"File not", "+ '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' +", "+ csv_path.split('/')[-1][0:-4] + '.txt' my_df.to_csv(full_output_csv_path, header=False, sep='\\t', index=False) else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) +", "} kwargs = { **defaultKwargs, **kwargs } # my_df = pd.DataFrame(index=False) my_df =", "paths of all the audios \"\"\" if csv_flag == False: check_ending_format(transcript_path, 'txt') transcript_data", "do you want to overwrite? (y)\") if input().lower() != 'y': print(\"File not modified\")", "'/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.csv' my_df.to_csv(full_output_csv_path, index=False) else: if", "#!/usr/bin/env python3 \"\"\" Utilities functions to check and find files from UNIX-based systems", "csv), it returns a list of the paths of all the audios \"\"\"", "args[1]) elif len(args) > 3: check_same_length(args[1], args[2]) idx = 0 for current_list in", "your list1 and list2 have different lengths\") sys.exit() def check_csv_exists(csv_path): if exists(csv_path): print(\"CSV", "defaultKwargs = { 'time_format': True, 'txt_flag': False } kwargs = { **defaultKwargs, **kwargs", "> 3: check_same_length(args[1], args[2]) idx = 0 for current_list in args: my_df[columns_values[idx]] =", "\"\"\" Confirm_with_transcript requires: file path in first column To-do: check if in csv", "(or csv), it returns a list of the paths of all the audios", "the header_none will throw count the header as the first row. To-do: Add", "else: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f +", "+ '.csv' my_df.to_csv(full_output_csv_path, index=False) else: if kwargs['txt_flag']: full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' +", "= idx + 1 today_date = '_' + str(datetime.date.today()) datetime_object = datetime.datetime.now() time_f", "transcript in txt (or csv), it returns a list of the paths of", "pd.DataFrame(index=False) my_df = pd.DataFrame() csv_path = kwargs['path'] columns_values = kwargs['cols'] # check if", "of the functions: # - check_empty_folder .- ### # # # Check processes", "!= len(list2): print(\"Error, your list1 and list2 have different lengths\") sys.exit() def check_csv_exists(csv_path):", "of paths from transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2 lists, full path", "'txt') transcript_data = pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) path_list = transcript_data[0].tolist() return path_list def", "list of paths from transcript transcript_lists = sorted(get_pathsList_from_transcript(transcript_path)) # Compare 2 lists, full", "= pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) if single_col: GT_list = transcript_data[0].tolist() else: GT_list =", "= pd.read_csv(transcript_path, sep=\"\\t\", header=None, index_col=False) path_list = transcript_data[0].tolist() return path_list def get_list_of_GT(folder_path, csv_flag=False,", "for the csv \"\"\" defaultKwargs = { 'time_format': True, 'txt_flag': False } kwargs", "1: print(\"Too many transcripts! Please use only 1\") else: input_transcript_path = all_texts[0] print(input_transcript_path)" ]
[ "keras.models import Sequential from keras.layers import Dense from keras.datasets import mnist x_train =", "= np.zeros((test_size, 10)) for i in range(test_size): y_test[i][y_test_tmp[i]] = 1 pass if __name__", "x_test, y_test (x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data() x_train = x_train_tmp.reshape(-1, 784) x_test", "x_test.shape[0] y_train = np.zeros((train_size, 10)) for i in range(train_size): y_train[i][y_train_tmp[i]] = 1 y_test", "Sequential from keras.layers import Dense from keras.datasets import mnist x_train = None y_train", "y_train = None x_test = None y_test = None def init(): global x_train,", "range(train_size): y_train[i][y_train_tmp[i]] = 1 y_test = np.zeros((test_size, 10)) for i in range(test_size): y_test[i][y_test_tmp[i]]", "from keras.layers import Dense from keras.datasets import mnist x_train = None y_train =", "model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time = time.time() model.fit(x_train, y_train, epochs=10, batch_size=1000) loss_and_metrics = model.evaluate(x_test,", "epochs=10, batch_size=1000) loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000) print(loss_and_metrics) print('Total Time: ', (time.time() -", "numpy as np from keras.models import Sequential from keras.layers import Dense from keras.datasets", "import keras import numpy as np from keras.models import Sequential from keras.layers import", "def init(): global x_train, y_train, x_test, y_test (x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data()", "x_test = x_test_tmp.reshape(-1, 784) train_size = x_train.shape[0] test_size = x_test.shape[0] y_train = np.zeros((train_size,", "if __name__ == '__main__': import time init() model = Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784))", "10)) for i in range(train_size): y_train[i][y_train_tmp[i]] = 1 y_test = np.zeros((test_size, 10)) for", "= x_train_tmp.reshape(-1, 784) x_test = x_test_tmp.reshape(-1, 784) train_size = x_train.shape[0] test_size = x_test.shape[0]", "input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time = time.time() model.fit(x_train, y_train,", "metrics=['accuracy']) start_time = time.time() model.fit(x_train, y_train, epochs=10, batch_size=1000) loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000)", "mnist.load_data() x_train = x_train_tmp.reshape(-1, 784) x_test = x_test_tmp.reshape(-1, 784) train_size = x_train.shape[0] test_size", "None x_test = None y_test = None def init(): global x_train, y_train, x_test,", "in range(train_size): y_train[i][y_train_tmp[i]] = 1 y_test = np.zeros((test_size, 10)) for i in range(test_size):", "in range(test_size): y_test[i][y_test_tmp[i]] = 1 pass if __name__ == '__main__': import time init()", "model.fit(x_train, y_train, epochs=10, batch_size=1000) loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000) print(loss_and_metrics) print('Total Time: ',", "train_size = x_train.shape[0] test_size = x_test.shape[0] y_train = np.zeros((train_size, 10)) for i in", "__name__ == '__main__': import time init() model = Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500,", "None y_train = None x_test = None y_test = None def init(): global", "y_train = np.zeros((train_size, 10)) for i in range(train_size): y_train[i][y_train_tmp[i]] = 1 y_test =", "i in range(test_size): y_test[i][y_test_tmp[i]] = 1 pass if __name__ == '__main__': import time", "y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data() x_train = x_train_tmp.reshape(-1, 784) x_test = x_test_tmp.reshape(-1, 784)", "keras.datasets import mnist x_train = None y_train = None x_test = None y_test", "batch_size=1000) loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000) print(loss_and_metrics) print('Total Time: ', (time.time() - start_time))", "y_train[i][y_train_tmp[i]] = 1 y_test = np.zeros((test_size, 10)) for i in range(test_size): y_test[i][y_test_tmp[i]] =", "= time.time() model.fit(x_train, y_train, epochs=10, batch_size=1000) loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000) print(loss_and_metrics) print('Total", "i in range(train_size): y_train[i][y_train_tmp[i]] = 1 y_test = np.zeros((test_size, 10)) for i in", "y_test_tmp) = mnist.load_data() x_train = x_train_tmp.reshape(-1, 784) x_test = x_test_tmp.reshape(-1, 784) train_size =", "= None def init(): global x_train, y_train, x_test, y_test (x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp)", "784) train_size = x_train.shape[0] test_size = x_test.shape[0] y_train = np.zeros((train_size, 10)) for i", "import Sequential from keras.layers import Dense from keras.datasets import mnist x_train = None", "y_train, x_test, y_test (x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data() x_train = x_train_tmp.reshape(-1, 784)", "for i in range(test_size): y_test[i][y_test_tmp[i]] = 1 pass if __name__ == '__main__': import", "pass if __name__ == '__main__': import time init() model = Sequential() model.add(Dense(units=1000, activation='sigmoid',", "None def init(): global x_train, y_train, x_test, y_test (x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) =", "1 pass if __name__ == '__main__': import time init() model = Sequential() model.add(Dense(units=1000,", "1 y_test = np.zeros((test_size, 10)) for i in range(test_size): y_test[i][y_test_tmp[i]] = 1 pass", "(x_test_tmp, y_test_tmp) = mnist.load_data() x_train = x_train_tmp.reshape(-1, 784) x_test = x_test_tmp.reshape(-1, 784) train_size", "= None y_test = None def init(): global x_train, y_train, x_test, y_test (x_train_tmp,", "y_train, epochs=10, batch_size=1000) loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000) print(loss_and_metrics) print('Total Time: ', (time.time()", "as np from keras.models import Sequential from keras.layers import Dense from keras.datasets import", "import numpy as np from keras.models import Sequential from keras.layers import Dense from", "10)) for i in range(test_size): y_test[i][y_test_tmp[i]] = 1 pass if __name__ == '__main__':", "activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time = time.time() model.fit(x_train,", "time.time() model.fit(x_train, y_train, epochs=10, batch_size=1000) loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000) print(loss_and_metrics) print('Total Time:", "= None y_train = None x_test = None y_test = None def init():", "import mnist x_train = None y_train = None x_test = None y_test =", "x_train_tmp.reshape(-1, 784) x_test = x_test_tmp.reshape(-1, 784) train_size = x_train.shape[0] test_size = x_test.shape[0] y_train", "loss='categorical_crossentropy', metrics=['accuracy']) start_time = time.time() model.fit(x_train, y_train, epochs=10, batch_size=1000) loss_and_metrics = model.evaluate(x_test, y_test,", "mnist x_train = None y_train = None x_test = None y_test = None", "'__main__': import time init() model = Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10,", "(x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data() x_train = x_train_tmp.reshape(-1, 784) x_test = x_test_tmp.reshape(-1,", "activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time = time.time() model.fit(x_train, y_train, epochs=10, batch_size=1000) loss_and_metrics =", "x_train, y_train, x_test, y_test (x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data() x_train = x_train_tmp.reshape(-1,", "time init() model = Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam',", "import Dense from keras.datasets import mnist x_train = None y_train = None x_test", "Dense from keras.datasets import mnist x_train = None y_train = None x_test =", "import time init() model = Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax'))", "test_size = x_test.shape[0] y_train = np.zeros((train_size, 10)) for i in range(train_size): y_train[i][y_train_tmp[i]] =", "y_test[i][y_test_tmp[i]] = 1 pass if __name__ == '__main__': import time init() model =", "= x_test_tmp.reshape(-1, 784) train_size = x_train.shape[0] test_size = x_test.shape[0] y_train = np.zeros((train_size, 10))", "model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time = time.time() model.fit(x_train, y_train, epochs=10, batch_size=1000) loss_and_metrics", "np.zeros((test_size, 10)) for i in range(test_size): y_test[i][y_test_tmp[i]] = 1 pass if __name__ ==", "from keras.datasets import mnist x_train = None y_train = None x_test = None", "x_train = None y_train = None x_test = None y_test = None def", "init(): global x_train, y_train, x_test, y_test (x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data() x_train", "keras.layers import Dense from keras.datasets import mnist x_train = None y_train = None", "range(test_size): y_test[i][y_test_tmp[i]] = 1 pass if __name__ == '__main__': import time init() model", "activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time = time.time() model.fit(x_train, y_train, epochs=10, batch_size=1000)", "model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time = time.time()", "= mnist.load_data() x_train = x_train_tmp.reshape(-1, 784) x_test = x_test_tmp.reshape(-1, 784) train_size = x_train.shape[0]", "= None x_test = None y_test = None def init(): global x_train, y_train,", "x_test_tmp.reshape(-1, 784) train_size = x_train.shape[0] test_size = x_test.shape[0] y_train = np.zeros((train_size, 10)) for", "global x_train, y_train, x_test, y_test (x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data() x_train =", "784) x_test = x_test_tmp.reshape(-1, 784) train_size = x_train.shape[0] test_size = x_test.shape[0] y_train =", "x_test = None y_test = None def init(): global x_train, y_train, x_test, y_test", "x_train.shape[0] test_size = x_test.shape[0] y_train = np.zeros((train_size, 10)) for i in range(train_size): y_train[i][y_train_tmp[i]]", "= x_test.shape[0] y_train = np.zeros((train_size, 10)) for i in range(train_size): y_train[i][y_train_tmp[i]] = 1", "= np.zeros((train_size, 10)) for i in range(train_size): y_train[i][y_train_tmp[i]] = 1 y_test = np.zeros((test_size,", "y_test = np.zeros((test_size, 10)) for i in range(test_size): y_test[i][y_test_tmp[i]] = 1 pass if", "model = Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])", "Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time =", "= 1 pass if __name__ == '__main__': import time init() model = Sequential()", "x_train = x_train_tmp.reshape(-1, 784) x_test = x_test_tmp.reshape(-1, 784) train_size = x_train.shape[0] test_size =", "np.zeros((train_size, 10)) for i in range(train_size): y_train[i][y_train_tmp[i]] = 1 y_test = np.zeros((test_size, 10))", "== '__main__': import time init() model = Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid'))", "from keras.models import Sequential from keras.layers import Dense from keras.datasets import mnist x_train", "for i in range(train_size): y_train[i][y_train_tmp[i]] = 1 y_test = np.zeros((test_size, 10)) for i", "None y_test = None def init(): global x_train, y_train, x_test, y_test (x_train_tmp, y_train_tmp),", "start_time = time.time() model.fit(x_train, y_train, epochs=10, batch_size=1000) loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000) print(loss_and_metrics)", "y_test (x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data() x_train = x_train_tmp.reshape(-1, 784) x_test =", "np from keras.models import Sequential from keras.layers import Dense from keras.datasets import mnist", "= 1 y_test = np.zeros((test_size, 10)) for i in range(test_size): y_test[i][y_test_tmp[i]] = 1", "y_test = None def init(): global x_train, y_train, x_test, y_test (x_train_tmp, y_train_tmp), (x_test_tmp,", "keras import numpy as np from keras.models import Sequential from keras.layers import Dense", "= x_train.shape[0] test_size = x_test.shape[0] y_train = np.zeros((train_size, 10)) for i in range(train_size):", "= Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time", "init() model = Sequential() model.add(Dense(units=1000, activation='sigmoid', input_dim=784)) model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy',", "model.add(Dense(units=500, activation='sigmoid')) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) start_time = time.time() model.fit(x_train, y_train, epochs=10," ]
[ "} def start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, ) def parse(self, response): yield", "import PageCoroutine class HandleTimeoutMiddleware: def process_exception(self, request, exception, spider): logging.info(\"Caught exception: %s\", exception.__class__)", "PageCoroutine( \"screenshot\", path=Path(__file__).parent / \"recovered.png\", full_page=True ), ], }, ) class HandleExceptionSpider(Spider): \"\"\"", "/ \"recovered.png\", full_page=True ), ], }, ) class HandleExceptionSpider(Spider): \"\"\" Handle exceptions in", "class HandleTimeoutMiddleware: def process_exception(self, request, exception, spider): logging.info(\"Caught exception: %s\", exception.__class__) return Request(", "logging from pathlib import Path from scrapy import Spider, Request from scrapy.crawler import", "from scrapy import Spider, Request from scrapy.crawler import CrawlerProcess from scrapy_playwright.page import PageCoroutine", "== \"__main__\": process = CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", #", "from scrapy_playwright.page import PageCoroutine class HandleTimeoutMiddleware: def process_exception(self, request, exception, spider): logging.info(\"Caught exception:", "response.url} if __name__ == \"__main__\": process = CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": {", "yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, ) def parse(self, response): yield {\"url\": response.url} if", "Request from scrapy.crawler import CrawlerProcess from scrapy_playwright.page import PageCoroutine class HandleTimeoutMiddleware: def process_exception(self,", "HandleTimeoutMiddleware: 100, }, } def start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, ) def", "], }, ) class HandleExceptionSpider(Spider): \"\"\" Handle exceptions in the Playwright downloader, such", "such as TimeoutError \"\"\" name = \"awesome\" custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\":", "downloader, such as TimeoutError \"\"\" name = \"awesome\" custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000,", "Spider, Request from scrapy.crawler import CrawlerProcess from scrapy_playwright.page import PageCoroutine class HandleTimeoutMiddleware: def", "\"\"\" name = \"awesome\" custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100,", "\"__main__\": process = CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", # \"http\":", "url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, ) def parse(self, response): yield {\"url\": response.url} if __name__ ==", "import Path from scrapy import Spider, Request from scrapy.crawler import CrawlerProcess from scrapy_playwright.page", "CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", # \"http\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", }, \"RETRY_TIMES\":", "CrawlerProcess from scrapy_playwright.page import PageCoroutine class HandleTimeoutMiddleware: def process_exception(self, request, exception, spider): logging.info(\"Caught", "HandleTimeoutMiddleware: def process_exception(self, request, exception, spider): logging.info(\"Caught exception: %s\", exception.__class__) return Request( url=\"https://httpbin.org/get\",", "exception.__class__) return Request( url=\"https://httpbin.org/get\", meta={ \"playwright\": True, \"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\", path=Path(__file__).parent /", "True, \"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\", path=Path(__file__).parent / \"recovered.png\", full_page=True ), ], }, )", "{ \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100, }, } def start_requests(self): yield Request(", "def start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, ) def parse(self, response): yield {\"url\":", "1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100, }, } def start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\":", "Handle exceptions in the Playwright downloader, such as TimeoutError \"\"\" name = \"awesome\"", "start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, ) def parse(self, response): yield {\"url\": response.url}", "scrapy.crawler import CrawlerProcess from scrapy_playwright.page import PageCoroutine class HandleTimeoutMiddleware: def process_exception(self, request, exception,", "scrapy import Spider, Request from scrapy.crawler import CrawlerProcess from scrapy_playwright.page import PageCoroutine class", "= { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100, }, } def start_requests(self): yield", "\"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", # \"http\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", }, \"RETRY_TIMES\": 0, } )", "100, }, } def start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, ) def parse(self,", "parse(self, response): yield {\"url\": response.url} if __name__ == \"__main__\": process = CrawlerProcess( settings={", "in the Playwright downloader, such as TimeoutError \"\"\" name = \"awesome\" custom_settings =", "import Spider, Request from scrapy.crawler import CrawlerProcess from scrapy_playwright.page import PageCoroutine class HandleTimeoutMiddleware:", "yield {\"url\": response.url} if __name__ == \"__main__\": process = CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\",", "{\"url\": response.url} if __name__ == \"__main__\": process = CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\":", "{ HandleTimeoutMiddleware: 100, }, } def start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, )", "Path from scrapy import Spider, Request from scrapy.crawler import CrawlerProcess from scrapy_playwright.page import", ") def parse(self, response): yield {\"url\": response.url} if __name__ == \"__main__\": process =", "import logging from pathlib import Path from scrapy import Spider, Request from scrapy.crawler", "def process_exception(self, request, exception, spider): logging.info(\"Caught exception: %s\", exception.__class__) return Request( url=\"https://httpbin.org/get\", meta={", ") class HandleExceptionSpider(Spider): \"\"\" Handle exceptions in the Playwright downloader, such as TimeoutError", "\"screenshot\", path=Path(__file__).parent / \"recovered.png\", full_page=True ), ], }, ) class HandleExceptionSpider(Spider): \"\"\" Handle", "as TimeoutError \"\"\" name = \"awesome\" custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": {", "logging.info(\"Caught exception: %s\", exception.__class__) return Request( url=\"https://httpbin.org/get\", meta={ \"playwright\": True, \"playwright_page_coroutines\": [ PageCoroutine(", "return Request( url=\"https://httpbin.org/get\", meta={ \"playwright\": True, \"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\", path=Path(__file__).parent / \"recovered.png\",", "%s\", exception.__class__) return Request( url=\"https://httpbin.org/get\", meta={ \"playwright\": True, \"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\", path=Path(__file__).parent", "full_page=True ), ], }, ) class HandleExceptionSpider(Spider): \"\"\" Handle exceptions in the Playwright", "\"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", # \"http\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", }, \"RETRY_TIMES\": 0, } ) process.crawl(HandleExceptionSpider)", "\"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100, }, } def start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True},", "), ], }, ) class HandleExceptionSpider(Spider): \"\"\" Handle exceptions in the Playwright downloader,", "}, ) class HandleExceptionSpider(Spider): \"\"\" Handle exceptions in the Playwright downloader, such as", "spider): logging.info(\"Caught exception: %s\", exception.__class__) return Request( url=\"https://httpbin.org/get\", meta={ \"playwright\": True, \"playwright_page_coroutines\": [", "\"\"\" Handle exceptions in the Playwright downloader, such as TimeoutError \"\"\" name =", "meta={ \"playwright\": True, \"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\", path=Path(__file__).parent / \"recovered.png\", full_page=True ), ],", "__name__ == \"__main__\": process = CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\",", "\"awesome\" custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100, }, } def", "process = CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", # \"http\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\",", "from scrapy.crawler import CrawlerProcess from scrapy_playwright.page import PageCoroutine class HandleTimeoutMiddleware: def process_exception(self, request,", "def parse(self, response): yield {\"url\": response.url} if __name__ == \"__main__\": process = CrawlerProcess(", "= CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", # \"http\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", },", "url=\"https://httpbin.org/get\", meta={ \"playwright\": True, \"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\", path=Path(__file__).parent / \"recovered.png\", full_page=True ),", "PageCoroutine class HandleTimeoutMiddleware: def process_exception(self, request, exception, spider): logging.info(\"Caught exception: %s\", exception.__class__) return", "scrapy_playwright.page import PageCoroutine class HandleTimeoutMiddleware: def process_exception(self, request, exception, spider): logging.info(\"Caught exception: %s\",", "TimeoutError \"\"\" name = \"awesome\" custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware:", "if __name__ == \"__main__\": process = CrawlerProcess( settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\":", "\"playwright\": True, \"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\", path=Path(__file__).parent / \"recovered.png\", full_page=True ), ], },", "meta={\"playwright\": True}, ) def parse(self, response): yield {\"url\": response.url} if __name__ == \"__main__\":", "Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, ) def parse(self, response): yield {\"url\": response.url} if __name__", "settings={ \"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", # \"http\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", }, \"RETRY_TIMES\": 0,", "HandleExceptionSpider(Spider): \"\"\" Handle exceptions in the Playwright downloader, such as TimeoutError \"\"\" name", "process_exception(self, request, exception, spider): logging.info(\"Caught exception: %s\", exception.__class__) return Request( url=\"https://httpbin.org/get\", meta={ \"playwright\":", "class HandleExceptionSpider(Spider): \"\"\" Handle exceptions in the Playwright downloader, such as TimeoutError \"\"\"", "import CrawlerProcess from scrapy_playwright.page import PageCoroutine class HandleTimeoutMiddleware: def process_exception(self, request, exception, spider):", "Request( url=\"https://httpbin.org/get\", meta={ \"playwright\": True, \"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\", path=Path(__file__).parent / \"recovered.png\", full_page=True", "request, exception, spider): logging.info(\"Caught exception: %s\", exception.__class__) return Request( url=\"https://httpbin.org/get\", meta={ \"playwright\": True,", "\"TWISTED_REACTOR\": \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\", \"DOWNLOAD_HANDLERS\": { \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", # \"http\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", }, \"RETRY_TIMES\": 0, }", "= \"awesome\" custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100, }, }", "\"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\", path=Path(__file__).parent / \"recovered.png\", full_page=True ), ], }, ) class", "Playwright downloader, such as TimeoutError \"\"\" name = \"awesome\" custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\":", "\"recovered.png\", full_page=True ), ], }, ) class HandleExceptionSpider(Spider): \"\"\" Handle exceptions in the", "exceptions in the Playwright downloader, such as TimeoutError \"\"\" name = \"awesome\" custom_settings", "pathlib import Path from scrapy import Spider, Request from scrapy.crawler import CrawlerProcess from", "{ \"https\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", # \"http\": \"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler\", }, \"RETRY_TIMES\": 0, } ) process.crawl(HandleExceptionSpider) process.start()", "path=Path(__file__).parent / \"recovered.png\", full_page=True ), ], }, ) class HandleExceptionSpider(Spider): \"\"\" Handle exceptions", "}, } def start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\", meta={\"playwright\": True}, ) def parse(self, response):", "[ PageCoroutine( \"screenshot\", path=Path(__file__).parent / \"recovered.png\", full_page=True ), ], }, ) class HandleExceptionSpider(Spider):", "custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100, }, } def start_requests(self):", "response): yield {\"url\": response.url} if __name__ == \"__main__\": process = CrawlerProcess( settings={ \"TWISTED_REACTOR\":", "name = \"awesome\" custom_settings = { \"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100, },", "True}, ) def parse(self, response): yield {\"url\": response.url} if __name__ == \"__main__\": process", "from pathlib import Path from scrapy import Spider, Request from scrapy.crawler import CrawlerProcess", "exception, spider): logging.info(\"Caught exception: %s\", exception.__class__) return Request( url=\"https://httpbin.org/get\", meta={ \"playwright\": True, \"playwright_page_coroutines\":", "exception: %s\", exception.__class__) return Request( url=\"https://httpbin.org/get\", meta={ \"playwright\": True, \"playwright_page_coroutines\": [ PageCoroutine( \"screenshot\",", "the Playwright downloader, such as TimeoutError \"\"\" name = \"awesome\" custom_settings = {", "\"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT\": 1000, \"DOWNLOADER_MIDDLEWARES\": { HandleTimeoutMiddleware: 100, }, } def start_requests(self): yield Request( url=\"https://httpbin.org/delay/300\"," ]
[ "Pin.OUT) # Turn Fan forward 70% speed reverseFan.value(0) pwmFan.duty(70) # Decrease speed pwmFan.duty(50)", "speed reverseFan.value(1) pwmFan.duty(30) # Decrease speed pwmFan.duty(50) # Decrease speed further (it might", "speed further (it might stop) pwmFan.duty(30) # Turn Fan backwards 70% speed reverseFan.value(1)", "reverseFan.value(1) pwmFan.duty(30) # Decrease speed pwmFan.duty(50) # Decrease speed further (it might stop)", "Decrease speed further (it might stop) pwmFan.duty(30) # Turn Fan backwards 70% speed", "import Pin, PWM # Initialization pwmFan = PWM(Pin(21), duty=0) reverseFan = Pin(22, Pin.OUT)", "# Decrease speed pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(70) #", "Pin(22, Pin.OUT) # Turn Fan forward 70% speed reverseFan.value(0) pwmFan.duty(70) # Decrease speed", "pwmFan = PWM(Pin(21), duty=0) reverseFan = Pin(22, Pin.OUT) # Turn Fan forward 70%", "speed reverseFan.value(0) pwmFan.duty(70) # Decrease speed pwmFan.duty(50) # Decrease speed further (it might", "# Turn Fan backwards 70% speed reverseFan.value(1) pwmFan.duty(30) # Decrease speed pwmFan.duty(50) #", "70% speed reverseFan.value(1) pwmFan.duty(30) # Decrease speed pwmFan.duty(50) # Decrease speed further (it", "= Pin(22, Pin.OUT) # Turn Fan forward 70% speed reverseFan.value(0) pwmFan.duty(70) # Decrease", "# Initialization pwmFan = PWM(Pin(21), duty=0) reverseFan = Pin(22, Pin.OUT) # Turn Fan", "# Decrease speed further (it might stop) pwmFan.duty(30) # Turn Fan backwards 70%", "Turn Fan backwards 70% speed reverseFan.value(1) pwmFan.duty(30) # Decrease speed pwmFan.duty(50) # Decrease", "PWM(Pin(21), duty=0) reverseFan = Pin(22, Pin.OUT) # Turn Fan forward 70% speed reverseFan.value(0)", "# Decrease speed pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(30) #", "Decrease speed pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(30) # Turn", "# Turn Fan forward 70% speed reverseFan.value(0) pwmFan.duty(70) # Decrease speed pwmFan.duty(50) #", "reverseFan.value(0) pwmFan.duty(70) # Decrease speed pwmFan.duty(50) # Decrease speed further (it might stop)", "backwards 70% speed reverseFan.value(1) pwmFan.duty(30) # Decrease speed pwmFan.duty(50) # Decrease speed further", "PWM # Initialization pwmFan = PWM(Pin(21), duty=0) reverseFan = Pin(22, Pin.OUT) # Turn", "Fan forward 70% speed reverseFan.value(0) pwmFan.duty(70) # Decrease speed pwmFan.duty(50) # Decrease speed", "further (it might stop) pwmFan.duty(30) # Turn Fan backwards 70% speed reverseFan.value(1) pwmFan.duty(30)", "Decrease speed pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(70) # Clean", "reverseFan = Pin(22, Pin.OUT) # Turn Fan forward 70% speed reverseFan.value(0) pwmFan.duty(70) #", "= PWM(Pin(21), duty=0) reverseFan = Pin(22, Pin.OUT) # Turn Fan forward 70% speed", "Turn Fan forward 70% speed reverseFan.value(0) pwmFan.duty(70) # Decrease speed pwmFan.duty(50) # Decrease", "might stop) pwmFan.duty(30) # Turn Fan backwards 70% speed reverseFan.value(1) pwmFan.duty(30) # Decrease", "pwmFan.duty(30) # Decrease speed pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(70)", "machine import Pin, PWM # Initialization pwmFan = PWM(Pin(21), duty=0) reverseFan = Pin(22,", "pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(70) # Clean up reverseFan(0)", "pwmFan.duty(70) # Decrease speed pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(30)", "(it might stop) pwmFan.duty(30) # Turn Fan backwards 70% speed reverseFan.value(1) pwmFan.duty(30) #", "pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(30) # Turn Fan backwards", "duty=0) reverseFan = Pin(22, Pin.OUT) # Turn Fan forward 70% speed reverseFan.value(0) pwmFan.duty(70)", "speed pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(70) # Clean up", "stop) pwmFan.duty(30) # Turn Fan backwards 70% speed reverseFan.value(1) pwmFan.duty(30) # Decrease speed", "forward 70% speed reverseFan.value(0) pwmFan.duty(70) # Decrease speed pwmFan.duty(50) # Decrease speed further", "from machine import Pin, PWM # Initialization pwmFan = PWM(Pin(21), duty=0) reverseFan =", "speed pwmFan.duty(50) # Decrease speed further (it might stop) pwmFan.duty(30) # Turn Fan", "Fan backwards 70% speed reverseFan.value(1) pwmFan.duty(30) # Decrease speed pwmFan.duty(50) # Decrease speed", "# Decrease speed further (it might stop) pwmFan.duty(70) # Clean up reverseFan(0) pwmFan.deinit()", "70% speed reverseFan.value(0) pwmFan.duty(70) # Decrease speed pwmFan.duty(50) # Decrease speed further (it", "Initialization pwmFan = PWM(Pin(21), duty=0) reverseFan = Pin(22, Pin.OUT) # Turn Fan forward", "pwmFan.duty(30) # Turn Fan backwards 70% speed reverseFan.value(1) pwmFan.duty(30) # Decrease speed pwmFan.duty(50)", "Pin, PWM # Initialization pwmFan = PWM(Pin(21), duty=0) reverseFan = Pin(22, Pin.OUT) #" ]
[ "read = sys.stdin.buffer.read readline = sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) n", "sys read = sys.stdin.buffer.read readline = sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7)", "sys.setrecursionlimit(10 ** 7) n = int(readline()) a, b = map(int, readline().split()) p =", "memo[0] += 1 elif a < check <= b: memo[1] += 1 else:", "** 7) n = int(readline()) a, b = map(int, readline().split()) p = list(map(int,", "0, 0] for check in p: if check <= a: memo[0] += 1", "elif a < check <= b: memo[1] += 1 else: memo[2] += 1", "p = list(map(int, readline().split())) memo = [0, 0, 0] for check in p:", "int(readline()) a, b = map(int, readline().split()) p = list(map(int, readline().split())) memo = [0,", "readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) n = int(readline()) a, b = map(int,", "check <= a: memo[0] += 1 elif a < check <= b: memo[1]", "= list(map(int, readline().split())) memo = [0, 0, 0] for check in p: if", "1 elif a < check <= b: memo[1] += 1 else: memo[2] +=", "a < check <= b: memo[1] += 1 else: memo[2] += 1 print(min(memo))", "= sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) n = int(readline()) a, b", "map(int, readline().split()) p = list(map(int, readline().split())) memo = [0, 0, 0] for check", "readline = sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) n = int(readline()) a,", "n = int(readline()) a, b = map(int, readline().split()) p = list(map(int, readline().split())) memo", "b = map(int, readline().split()) p = list(map(int, readline().split())) memo = [0, 0, 0]", "for check in p: if check <= a: memo[0] += 1 elif a", "sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) n = int(readline()) a, b = map(int, readline().split()) p", "in p: if check <= a: memo[0] += 1 elif a < check", "memo = [0, 0, 0] for check in p: if check <= a:", "if check <= a: memo[0] += 1 elif a < check <= b:", "7) n = int(readline()) a, b = map(int, readline().split()) p = list(map(int, readline().split()))", "= int(readline()) a, b = map(int, readline().split()) p = list(map(int, readline().split())) memo =", "readline().split())) memo = [0, 0, 0] for check in p: if check <=", "sys.stdin.buffer.read readline = sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) n = int(readline())", "= map(int, readline().split()) p = list(map(int, readline().split())) memo = [0, 0, 0] for", "readline().split()) p = list(map(int, readline().split())) memo = [0, 0, 0] for check in", "a: memo[0] += 1 elif a < check <= b: memo[1] += 1", "list(map(int, readline().split())) memo = [0, 0, 0] for check in p: if check", "+= 1 elif a < check <= b: memo[1] += 1 else: memo[2]", "import sys read = sys.stdin.buffer.read readline = sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 **", "= sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) n = int(readline()) a, b = map(int, readline().split())", "[0, 0, 0] for check in p: if check <= a: memo[0] +=", "0] for check in p: if check <= a: memo[0] += 1 elif", "check in p: if check <= a: memo[0] += 1 elif a <", "sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) n = int(readline()) a, b =", "p: if check <= a: memo[0] += 1 elif a < check <=", "= sys.stdin.buffer.read readline = sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) n =", "<= a: memo[0] += 1 elif a < check <= b: memo[1] +=", "a, b = map(int, readline().split()) p = list(map(int, readline().split())) memo = [0, 0,", "= [0, 0, 0] for check in p: if check <= a: memo[0]" ]
[ "License Copyright (c) 2020 <NAME> \"\"\" from flask import Blueprint user_bp = Blueprint(\"user\",", "2020 <NAME> \"\"\" from flask import Blueprint user_bp = Blueprint(\"user\", __name__) from .", "(c) 2020 <NAME> \"\"\" from flask import Blueprint user_bp = Blueprint(\"user\", __name__) from", "\"\"\" MIT License Copyright (c) 2020 <NAME> \"\"\" from flask import Blueprint user_bp", "\"\"\" from flask import Blueprint user_bp = Blueprint(\"user\", __name__) from . import views", "MIT License Copyright (c) 2020 <NAME> \"\"\" from flask import Blueprint user_bp =", "<NAME> \"\"\" from flask import Blueprint user_bp = Blueprint(\"user\", __name__) from . import", "Copyright (c) 2020 <NAME> \"\"\" from flask import Blueprint user_bp = Blueprint(\"user\", __name__)" ]
[ "dirnames, filenames in os.walk('../opt_results/'): if not dirnames: print(dirpath) cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]),", "SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG', verbose_level='OFF', show_jupyter=False, ) cave.analyze() if", "analyze_all(): for dirpath, dirnames, filenames in os.walk('../opt_results/'): if not dirnames: print(dirpath) cave =", "file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG', verbose_level='OFF', show_jupyter=False, ) cave.analyze() if __name__", "os.walk('../opt_results/'): if not dirnames: print(dirpath) cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output for", "dirnames: print(dirpath) cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output for debug/images/etc ta_exec_dir=[\".\"], #", "cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output for debug/images/etc ta_exec_dir=[\".\"], # Only important", "Only important for SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG', verbose_level='OFF', show_jupyter=False,", "output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output for debug/images/etc ta_exec_dir=[\".\"], # Only important for SMAC-results file_format='BOHB'", "from cave.cavefacade import CAVE def analyze_all(): for dirpath, dirnames, filenames in os.walk('../opt_results/'): if", "for dirpath, dirnames, filenames in os.walk('../opt_results/'): if not dirnames: print(dirpath) cave = CAVE(folders=[dirpath],", "in os.walk('../opt_results/'): if not dirnames: print(dirpath) cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output", "dirpath, dirnames, filenames in os.walk('../opt_results/'): if not dirnames: print(dirpath) cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\",", "filenames in os.walk('../opt_results/'): if not dirnames: print(dirpath) cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), #", "for SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG', verbose_level='OFF', show_jupyter=False, ) cave.analyze()", "important for SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG', verbose_level='OFF', show_jupyter=False, )", "ta_exec_dir=[\".\"], # Only important for SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG',", "if not dirnames: print(dirpath) cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output for debug/images/etc", "<gh_stars>10-100 import os from cave.cavefacade import CAVE def analyze_all(): for dirpath, dirnames, filenames", "os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG', verbose_level='OFF', show_jupyter=False, ) cave.analyze() if __name__ == '__main__':", "cave.cavefacade import CAVE def analyze_all(): for dirpath, dirnames, filenames in os.walk('../opt_results/'): if not", "dirpath[15:]), # output for debug/images/etc ta_exec_dir=[\".\"], # Only important for SMAC-results file_format='BOHB' if", "os from cave.cavefacade import CAVE def analyze_all(): for dirpath, dirnames, filenames in os.walk('../opt_results/'):", "'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG', verbose_level='OFF', show_jupyter=False, ) cave.analyze() if __name__ == '__main__': analyze_all()", "not dirnames: print(dirpath) cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output for debug/images/etc ta_exec_dir=[\".\"],", "if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG', verbose_level='OFF', show_jupyter=False, ) cave.analyze() if __name__ ==", "debug/images/etc ta_exec_dir=[\".\"], # Only important for SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3',", "import os from cave.cavefacade import CAVE def analyze_all(): for dirpath, dirnames, filenames in", "CAVE def analyze_all(): for dirpath, dirnames, filenames in os.walk('../opt_results/'): if not dirnames: print(dirpath)", "output for debug/images/etc ta_exec_dir=[\".\"], # Only important for SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json'))", "CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output for debug/images/etc ta_exec_dir=[\".\"], # Only important for SMAC-results", "import CAVE def analyze_all(): for dirpath, dirnames, filenames in os.walk('../opt_results/'): if not dirnames:", "print(dirpath) cave = CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output for debug/images/etc ta_exec_dir=[\".\"], # Only", "# output for debug/images/etc ta_exec_dir=[\".\"], # Only important for SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath,", "= CAVE(folders=[dirpath], output_dir=os.path.join(\"../CAVE_reports\", dirpath[15:]), # output for debug/images/etc ta_exec_dir=[\".\"], # Only important for", "for debug/images/etc ta_exec_dir=[\".\"], # Only important for SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else", "# Only important for SMAC-results file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3', #verbose_level='DEV_DEBUG', verbose_level='OFF',", "def analyze_all(): for dirpath, dirnames, filenames in os.walk('../opt_results/'): if not dirnames: print(dirpath) cave" ]
[ "= {numero * i}') i += 1 resposta = str(input('Deseja continuar[S/N]: ')).strip().upper()[0] if", "10: print(f'{numero} x {i} = {numero * i}') i += 1 resposta =", "i <= 10: print(f'{numero} x {i} = {numero * i}') i += 1", "int(input('Digite um número: ')) i = 0 while i <= 10: print(f'{numero} x", "{i} = {numero * i}') i += 1 resposta = str(input('Deseja continuar[S/N]: ')).strip().upper()[0]", "0 while i <= 10: print(f'{numero} x {i} = {numero * i}') i", "while True: numero = int(input('Digite um número: ')) i = 0 while i", "x {i} = {numero * i}') i += 1 resposta = str(input('Deseja continuar[S/N]:", "<= 10: print(f'{numero} x {i} = {numero * i}') i += 1 resposta", "* i}') i += 1 resposta = str(input('Deseja continuar[S/N]: ')).strip().upper()[0] if resposta ==", "numero = int(input('Digite um número: ')) i = 0 while i <= 10:", "número: ')) i = 0 while i <= 10: print(f'{numero} x {i} =", "i += 1 resposta = str(input('Deseja continuar[S/N]: ')).strip().upper()[0] if resposta == 'N': break", "{numero * i}') i += 1 resposta = str(input('Deseja continuar[S/N]: ')).strip().upper()[0] if resposta", "i}') i += 1 resposta = str(input('Deseja continuar[S/N]: ')).strip().upper()[0] if resposta == 'N':", "= 0 while i <= 10: print(f'{numero} x {i} = {numero * i}')", "= int(input('Digite um número: ')) i = 0 while i <= 10: print(f'{numero}", "um número: ')) i = 0 while i <= 10: print(f'{numero} x {i}", "')) i = 0 while i <= 10: print(f'{numero} x {i} = {numero", "True: numero = int(input('Digite um número: ')) i = 0 while i <=", "i = 0 while i <= 10: print(f'{numero} x {i} = {numero *", "while i <= 10: print(f'{numero} x {i} = {numero * i}') i +=", "print(f'{numero} x {i} = {numero * i}') i += 1 resposta = str(input('Deseja" ]
[ "TestSixtynine(unittest.TestCase): def setUp(self): self.loop = asyncio.get_event_loop() def tearDown(self): self.loop.close() def test_mouthful(self): self.assertEqual(self.loop.run_until_complete(sixtynine.mouthful()), 69)", "unittest import sixtynine class TestSixtynine(unittest.TestCase): def setUp(self): self.loop = asyncio.get_event_loop() def tearDown(self): self.loop.close()", "asyncio import unittest import sixtynine class TestSixtynine(unittest.TestCase): def setUp(self): self.loop = asyncio.get_event_loop() def", "sixtynine class TestSixtynine(unittest.TestCase): def setUp(self): self.loop = asyncio.get_event_loop() def tearDown(self): self.loop.close() def test_mouthful(self):", "#!/usr/bin/env python3 import asyncio import unittest import sixtynine class TestSixtynine(unittest.TestCase): def setUp(self): self.loop", "python3 import asyncio import unittest import sixtynine class TestSixtynine(unittest.TestCase): def setUp(self): self.loop =", "import unittest import sixtynine class TestSixtynine(unittest.TestCase): def setUp(self): self.loop = asyncio.get_event_loop() def tearDown(self):", "import sixtynine class TestSixtynine(unittest.TestCase): def setUp(self): self.loop = asyncio.get_event_loop() def tearDown(self): self.loop.close() def", "import asyncio import unittest import sixtynine class TestSixtynine(unittest.TestCase): def setUp(self): self.loop = asyncio.get_event_loop()", "class TestSixtynine(unittest.TestCase): def setUp(self): self.loop = asyncio.get_event_loop() def tearDown(self): self.loop.close() def test_mouthful(self): self.assertEqual(self.loop.run_until_complete(sixtynine.mouthful())," ]
[ "os.path.join(theme_path, 'template'), debug = True, title = title, email = email, login_url =", "import * from handlers import handlers theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name) class Application(web.Application):", "def __init__(self): settings = dict( static_path = os.path.join(theme_path, 'static'), template_path = os.path.join(theme_path, 'template'),", "os.path.join(os.path.dirname(__file__), 'themes', theme_name) class Application(web.Application): def __init__(self): settings = dict( static_path = os.path.join(theme_path,", "'themes', theme_name) class Application(web.Application): def __init__(self): settings = dict( static_path = os.path.join(theme_path, 'static'),", "os from tornado import web from settings import * from handlers import handlers", "title, email = email, login_url = '/onepeice', cookie_secret = '<KEY> ) super(Application, self).__init__(handlers,", "tornado import web from settings import * from handlers import handlers theme_path =", "handlers theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name) class Application(web.Application): def __init__(self): settings = dict(", "= os.path.join(theme_path, 'static'), template_path = os.path.join(theme_path, 'template'), debug = True, title = title,", "= True, title = title, email = email, login_url = '/onepeice', cookie_secret =", "Application(web.Application): def __init__(self): settings = dict( static_path = os.path.join(theme_path, 'static'), template_path = os.path.join(theme_path,", "from handlers import handlers theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name) class Application(web.Application): def __init__(self):", "dict( static_path = os.path.join(theme_path, 'static'), template_path = os.path.join(theme_path, 'template'), debug = True, title", "= title, email = email, login_url = '/onepeice', cookie_secret = '<KEY> ) super(Application,", "email = email, login_url = '/onepeice', cookie_secret = '<KEY> ) super(Application, self).__init__(handlers, **settings)", "<gh_stars>0 import os from tornado import web from settings import * from handlers", "debug = True, title = title, email = email, login_url = '/onepeice', cookie_secret", "= os.path.join(os.path.dirname(__file__), 'themes', theme_name) class Application(web.Application): def __init__(self): settings = dict( static_path =", "theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name) class Application(web.Application): def __init__(self): settings = dict( static_path", "= dict( static_path = os.path.join(theme_path, 'static'), template_path = os.path.join(theme_path, 'template'), debug = True,", "settings = dict( static_path = os.path.join(theme_path, 'static'), template_path = os.path.join(theme_path, 'template'), debug =", "settings import * from handlers import handlers theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name) class", "handlers import handlers theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name) class Application(web.Application): def __init__(self): settings", "'template'), debug = True, title = title, email = email, login_url = '/onepeice',", "= os.path.join(theme_path, 'template'), debug = True, title = title, email = email, login_url", "import handlers theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name) class Application(web.Application): def __init__(self): settings =", "True, title = title, email = email, login_url = '/onepeice', cookie_secret = '<KEY>", "from tornado import web from settings import * from handlers import handlers theme_path", "template_path = os.path.join(theme_path, 'template'), debug = True, title = title, email = email,", "'static'), template_path = os.path.join(theme_path, 'template'), debug = True, title = title, email =", "from settings import * from handlers import handlers theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name)", "import os from tornado import web from settings import * from handlers import", "os.path.join(theme_path, 'static'), template_path = os.path.join(theme_path, 'template'), debug = True, title = title, email", "title = title, email = email, login_url = '/onepeice', cookie_secret = '<KEY> )", "class Application(web.Application): def __init__(self): settings = dict( static_path = os.path.join(theme_path, 'static'), template_path =", "__init__(self): settings = dict( static_path = os.path.join(theme_path, 'static'), template_path = os.path.join(theme_path, 'template'), debug", "theme_name) class Application(web.Application): def __init__(self): settings = dict( static_path = os.path.join(theme_path, 'static'), template_path", "static_path = os.path.join(theme_path, 'static'), template_path = os.path.join(theme_path, 'template'), debug = True, title =", "* from handlers import handlers theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name) class Application(web.Application): def", "web from settings import * from handlers import handlers theme_path = os.path.join(os.path.dirname(__file__), 'themes',", "import web from settings import * from handlers import handlers theme_path = os.path.join(os.path.dirname(__file__)," ]
[ "fileinput import filename import json filename = 'chapter_10/numbers.json' with open(filename) as f_object: numbers", "from fileinput import filename import json filename = 'chapter_10/numbers.json' with open(filename) as f_object:", "<reponame>CoffeeCodeRpt/python-crash-course<filename>number_reader.py from fileinput import filename import json filename = 'chapter_10/numbers.json' with open(filename) as", "import filename import json filename = 'chapter_10/numbers.json' with open(filename) as f_object: numbers =", "import json filename = 'chapter_10/numbers.json' with open(filename) as f_object: numbers = json.load(f_object) print(numbers)", "filename import json filename = 'chapter_10/numbers.json' with open(filename) as f_object: numbers = json.load(f_object)" ]