code
stringlengths
2k
1.04M
repo_path
stringlengths
5
517
parsed_code
stringlengths
0
1.04M
quality_prob
float64
0.02
0.95
learning_prob
float64
0.02
0.93
# --- Day 8: Matchsticks --- # Space on the sleigh is limited this year, and so Santa will be bringing his list as a digital copy. He needs to know how much space it will take up when stored. # It is common in many programming languages to provide a way to escape special characters in strings. For example, C, JavaScript, Perl, Python, and even PHP handle special characters in very similar ways. # However, it is important to realize the difference between the number of characters in the code representation of the string literal and the number of characters in the in-memory string itself. # For example: # "" is 2 characters of code (the two double quotes), but the string contains zero characters. # "abc" is 5 characters of code, but 3 characters in the string data. # "aaa\"aaa" is 10 characters of code, but the string itself contains six "a" characters and a single, escaped quote character, for a total of 7 characters in the string data. # "\x27" is 6 characters of code, but the string itself contains just one - an apostrophe ('), escaped using hexadecimal notation. # Santa's list is a file that contains many double-quoted string literals, one on each line. The only escape sequences used are \\ (which represents a single backslash), \" (which represents a lone double-quote character), and \x plus two hexadecimal characters (which represents a single character with that ASCII code). # Disregarding the whitespace in the file, what is the number of characters of code for string literals minus the number of characters in memory for the values of the strings in total for the entire file? # For example, given the four strings above, the total number of characters of string code (2 + 5 + 10 + 6 = 23) minus the total number of characters in memory for string values (0 + 3 + 7 + 1 = 11) is 23 - 11 = 12. import time, math, sys startTime = time.perf_counter() # time in seconds (float) debug = False timing = True unitTesting = False stringsList = [] def readInput(inputTextFileName): global stringsList with open("2015/day8/"+inputTextFileName,"r", encoding='utf-8') as file: stringsList = file.readlines() # remove newlines for i in range(0, len(stringsList)): stringsList[i] = stringsList[i].rstrip() def processStrings(): global stringsList codeCharacters = 0 memoryCharacters = 0 for stringItem in stringsList: codeCharacters += len(stringItem) memoryCharacters += (len(eval(stringItem))) if debug: print(stringItem,len(stringItem),(len(eval(stringItem)))) if debug: print("codeCharacters:",codeCharacters,"memoryCharacters:",memoryCharacters) return (codeCharacters, memoryCharacters) if unitTesting: print("Unit Testing") readInput("unit-test-input.txt") else: # read the input text file into a variable readInput("input.txt") (codeCharacters, memoryCharacters) = processStrings() answer = codeCharacters - memoryCharacters if unitTesting: testPass = False if debug: print("answer:", answer) testPass = (answer == 12) print("testPass:", testPass) else: print(answer) # this answer for my input is 1371 endTime = time.perf_counter() # time in seconds (float) if timing: print("Execution took ", endTime - startTime, " seconds.")
2015/day8/2015-day8-part1.py
# --- Day 8: Matchsticks --- # Space on the sleigh is limited this year, and so Santa will be bringing his list as a digital copy. He needs to know how much space it will take up when stored. # It is common in many programming languages to provide a way to escape special characters in strings. For example, C, JavaScript, Perl, Python, and even PHP handle special characters in very similar ways. # However, it is important to realize the difference between the number of characters in the code representation of the string literal and the number of characters in the in-memory string itself. # For example: # "" is 2 characters of code (the two double quotes), but the string contains zero characters. # "abc" is 5 characters of code, but 3 characters in the string data. # "aaa\"aaa" is 10 characters of code, but the string itself contains six "a" characters and a single, escaped quote character, for a total of 7 characters in the string data. # "\x27" is 6 characters of code, but the string itself contains just one - an apostrophe ('), escaped using hexadecimal notation. # Santa's list is a file that contains many double-quoted string literals, one on each line. The only escape sequences used are \\ (which represents a single backslash), \" (which represents a lone double-quote character), and \x plus two hexadecimal characters (which represents a single character with that ASCII code). # Disregarding the whitespace in the file, what is the number of characters of code for string literals minus the number of characters in memory for the values of the strings in total for the entire file? # For example, given the four strings above, the total number of characters of string code (2 + 5 + 10 + 6 = 23) minus the total number of characters in memory for string values (0 + 3 + 7 + 1 = 11) is 23 - 11 = 12. import time, math, sys startTime = time.perf_counter() # time in seconds (float) debug = False timing = True unitTesting = False stringsList = [] def readInput(inputTextFileName): global stringsList with open("2015/day8/"+inputTextFileName,"r", encoding='utf-8') as file: stringsList = file.readlines() # remove newlines for i in range(0, len(stringsList)): stringsList[i] = stringsList[i].rstrip() def processStrings(): global stringsList codeCharacters = 0 memoryCharacters = 0 for stringItem in stringsList: codeCharacters += len(stringItem) memoryCharacters += (len(eval(stringItem))) if debug: print(stringItem,len(stringItem),(len(eval(stringItem)))) if debug: print("codeCharacters:",codeCharacters,"memoryCharacters:",memoryCharacters) return (codeCharacters, memoryCharacters) if unitTesting: print("Unit Testing") readInput("unit-test-input.txt") else: # read the input text file into a variable readInput("input.txt") (codeCharacters, memoryCharacters) = processStrings() answer = codeCharacters - memoryCharacters if unitTesting: testPass = False if debug: print("answer:", answer) testPass = (answer == 12) print("testPass:", testPass) else: print(answer) # this answer for my input is 1371 endTime = time.perf_counter() # time in seconds (float) if timing: print("Execution took ", endTime - startTime, " seconds.")
0.535827
0.757279
# %% from warnings import warn from .. import SanUnit from ._decay import Decay from ..utils.loading import load_data, data_path __all__ = ('Toilet',) data_path += 'sanunit_data/_toilet.tsv' # %% class Toilet(SanUnit, Decay, isabstract=True): ''' Abstract class containing common parameters and design algorithms for toilets based on Trimmer et al. [1]_ Parameters ---------- N_user : float Number of people that share this toilet. N_toilet : float Number of parallel toilets. if_toilet_paper : bool If toilet paper is used. if_flushing : bool If water is used for flushing. if_cleansing : bool If water is used for cleansing. if_desiccant : bool If desiccant is used for moisture and odor control. if_air_emission : bool If emission to air occurs (i.e., if the pit is completely sealed off from the atmosphere). if_ideal_emptying : bool If the toilet appropriately emptied to avoid contamination to the environmental. CAPEX : float Capital cost of a single toilet. OPEX_over_CAPEX : float Fraction of annual operating cost over total capital cost. References ---------- .. [1] Trimmer et al., Navigating Multidimensional Social–Ecological System Trade-Offs across Sanitation Alternatives in an Urban Informal Settlement. Environ. Sci. Technol. 2020, 54 (19), 12641–12653. https://doi.org/10.1021/acs.est.0c03296. See Also -------- :ref:`qsdsan.sanunits.Decay <sanunits_Decay>` ''' def __init__(self, ID='', ins=None, outs=(), N_user=1, N_toilet=1, if_toilet_paper=True, if_flushing=True, if_cleansing=False, if_desiccant=False, if_air_emission=True, if_ideal_emptying=True, CAPEX=None, OPEX_over_CAPEX=None): SanUnit.__init__(self, ID, ins, outs) self._N_user = 1 self._N_toilet = 1 self.N_user = N_user self.N_toilet = N_toilet self.if_toilet_paper = if_toilet_paper self.if_flushing = if_flushing self.if_cleansing = if_cleansing self.if_desiccant = if_desiccant self.if_air_emission = if_air_emission self.if_ideal_emptying = if_ideal_emptying self.CAPEX = CAPEX self.OPEX_over_CAPEX = OPEX_over_CAPEX data = load_data(path=data_path) for para in data.index: value = float(data.loc[para]['expected']) if para in ('desiccant_V', 'desiccant_rho'): setattr(self, para, value) else: setattr(self, '_'+para, value) del data self._empty_ratio = 0.59 _N_ins = 6 _outs_size_is_fixed = False def _run(self): ur, fec, tp, fw, cw, des = self.ins tp.imass['Tissue'] = int(self.if_toilet_paper)*self.toilet_paper fw.imass['H2O'] = int(self.if_flushing)*self.flushing_water cw.imass['H2O'] = int(self.if_cleansing)*self.cleansing_water des.imass['WoodAsh'] = int(self.if_desiccant)*self.desiccant density_dct = { 'Sand': 1442, 'Gravel': 1600, 'Brick': 1750, 'Plastic': 0.63, 'Steel': 7900, 'StainlessSteelSheet': 2.64 } _BM = {'Single toilet': 1, 'Total toilets': 1} def _cost(self): self.purchase_costs['Single toilet'] = self.CAPEX self.purchase_costs['Total toilets'] = self.CAPEX * self.N_toilet add_OPEX = self.purchase_costs['Total toilets']*self.OPEX_over_CAPEX/365/24 self._add_OPEX = {'Additional OPEX': add_OPEX} @staticmethod def get_emptying_emission(waste, CH4, N2O, empty_ratio, CH4_factor, N2O_factor): ''' Calculate emissions due to non-ideal emptying based on Trimmer et al. [1]_ Parameters ---------- stream : WasteStream Excreta stream that is not appropriately empited (before emptying). CH4 : WasteStream Fugitive CH4 gas (before emptying). N2O : WasteStream Fugitive N2O gas (before emptying). empty_ratio : float Fraction of excreta that is appropriately emptied.. CH4_factor : float Factor to convert COD removal to CH4 emission. N2O_factor : float Factor to convert COD removal to N2O emission. Returns ------- stream : WasteStream Excreta stream that is not appropriately empited (after emptying). CH4 : WasteStream Fugitive CH4 gas (after emptying). N2O : WasteStream Fugitive N2O gas (after emptying). ''' COD_rmd = waste.COD*(1-empty_ratio)/1e3*waste.F_vol CH4.imass['CH4'] += COD_rmd * CH4_factor N2O.imass['N2O'] += COD_rmd * N2O_factor waste.mass *= empty_ratio return waste, CH4, N2O @property def N_user(self): '''[float] Number of people that use the toilet per hour.''' return self._N_user @N_user.setter def N_user(self, i): self._N_user = float(i) @property def N_toilet(self): '''[float] Number of parallel toilets.''' return self._N_toilet @N_toilet.setter def N_toilet(self, i): self._N_toilet = float(i) @property def toilet_paper(self): ''' [float] Amount of toilet paper used (if `if_toilet_paper` is True), [kg/cap/hr]. ''' return self._toilet_paper @toilet_paper.setter def toilet_paper(self, i): self._toilet_paper = float(i) @property def flushing_water(self): ''' [float] Amount of water used for flushing (if `if_flushing_water` is True), [kg/cap/hr]. ''' return self._flushing_water @flushing_water.setter def flushing_water(self, i): self._flushing_water = float(i) @property def cleansing_water(self): ''' [float] Amount of water used for cleansing (if `if_cleansing_water` is True), [kg/cap/hr]. ''' return self._cleansing_water @cleansing_water.setter def cleansing_water(self, i): self._cleansing_water = float(i) @property def desiccant(self): ''' [float] Amount of desiccant used (if `if_desiccant` is True), [kg/cap/hr]. .. note:: Value set by `desiccant_V` and `desiccant_rho`. ''' return self.desiccant_V*self.desiccant_rho @property def N_volatilization(self): ''' [float] Fraction of input N that volatizes to the air (if `if_air_emission` is True). ''' return self._N_volatilization @N_volatilization.setter def N_volatilization(self, i): self._N_volatilization = float(i) @property def empty_ratio(self): ''' [float] Fraction of excreta that is appropriately emptied. .. note:: Will be 1 (i.e., 100%) if `if_ideal_emptying` is True. ''' if self.if_ideal_emptying: return 1. return self._empty_ratio @empty_ratio.setter def empty_ratio(self, i): if self.if_ideal_emptying: msg = f'`if_ideal_emptying` is True, the set value {i} is ignored.' warn(msg, source=self) self._empty_ratio = float(i) @property def MCF_aq(self): '''[float] Methane correction factor for COD lost due to inappropriate emptying.''' return self._MCF_aq @MCF_aq.setter def MCF_aq(self, i): self._MCF_aq = float(i) @property def N2O_EF_aq(self): '''[float] Fraction of N emitted as N2O due to inappropriate emptying.''' return self._N2O_EF_aq @N2O_EF_aq.setter def N2O_EF_aq(self, i): self._N2O_EF_aq = float(i)
qsdsan/sanunits/_toilet.py
# %% from warnings import warn from .. import SanUnit from ._decay import Decay from ..utils.loading import load_data, data_path __all__ = ('Toilet',) data_path += 'sanunit_data/_toilet.tsv' # %% class Toilet(SanUnit, Decay, isabstract=True): ''' Abstract class containing common parameters and design algorithms for toilets based on Trimmer et al. [1]_ Parameters ---------- N_user : float Number of people that share this toilet. N_toilet : float Number of parallel toilets. if_toilet_paper : bool If toilet paper is used. if_flushing : bool If water is used for flushing. if_cleansing : bool If water is used for cleansing. if_desiccant : bool If desiccant is used for moisture and odor control. if_air_emission : bool If emission to air occurs (i.e., if the pit is completely sealed off from the atmosphere). if_ideal_emptying : bool If the toilet appropriately emptied to avoid contamination to the environmental. CAPEX : float Capital cost of a single toilet. OPEX_over_CAPEX : float Fraction of annual operating cost over total capital cost. References ---------- .. [1] Trimmer et al., Navigating Multidimensional Social–Ecological System Trade-Offs across Sanitation Alternatives in an Urban Informal Settlement. Environ. Sci. Technol. 2020, 54 (19), 12641–12653. https://doi.org/10.1021/acs.est.0c03296. See Also -------- :ref:`qsdsan.sanunits.Decay <sanunits_Decay>` ''' def __init__(self, ID='', ins=None, outs=(), N_user=1, N_toilet=1, if_toilet_paper=True, if_flushing=True, if_cleansing=False, if_desiccant=False, if_air_emission=True, if_ideal_emptying=True, CAPEX=None, OPEX_over_CAPEX=None): SanUnit.__init__(self, ID, ins, outs) self._N_user = 1 self._N_toilet = 1 self.N_user = N_user self.N_toilet = N_toilet self.if_toilet_paper = if_toilet_paper self.if_flushing = if_flushing self.if_cleansing = if_cleansing self.if_desiccant = if_desiccant self.if_air_emission = if_air_emission self.if_ideal_emptying = if_ideal_emptying self.CAPEX = CAPEX self.OPEX_over_CAPEX = OPEX_over_CAPEX data = load_data(path=data_path) for para in data.index: value = float(data.loc[para]['expected']) if para in ('desiccant_V', 'desiccant_rho'): setattr(self, para, value) else: setattr(self, '_'+para, value) del data self._empty_ratio = 0.59 _N_ins = 6 _outs_size_is_fixed = False def _run(self): ur, fec, tp, fw, cw, des = self.ins tp.imass['Tissue'] = int(self.if_toilet_paper)*self.toilet_paper fw.imass['H2O'] = int(self.if_flushing)*self.flushing_water cw.imass['H2O'] = int(self.if_cleansing)*self.cleansing_water des.imass['WoodAsh'] = int(self.if_desiccant)*self.desiccant density_dct = { 'Sand': 1442, 'Gravel': 1600, 'Brick': 1750, 'Plastic': 0.63, 'Steel': 7900, 'StainlessSteelSheet': 2.64 } _BM = {'Single toilet': 1, 'Total toilets': 1} def _cost(self): self.purchase_costs['Single toilet'] = self.CAPEX self.purchase_costs['Total toilets'] = self.CAPEX * self.N_toilet add_OPEX = self.purchase_costs['Total toilets']*self.OPEX_over_CAPEX/365/24 self._add_OPEX = {'Additional OPEX': add_OPEX} @staticmethod def get_emptying_emission(waste, CH4, N2O, empty_ratio, CH4_factor, N2O_factor): ''' Calculate emissions due to non-ideal emptying based on Trimmer et al. [1]_ Parameters ---------- stream : WasteStream Excreta stream that is not appropriately empited (before emptying). CH4 : WasteStream Fugitive CH4 gas (before emptying). N2O : WasteStream Fugitive N2O gas (before emptying). empty_ratio : float Fraction of excreta that is appropriately emptied.. CH4_factor : float Factor to convert COD removal to CH4 emission. N2O_factor : float Factor to convert COD removal to N2O emission. Returns ------- stream : WasteStream Excreta stream that is not appropriately empited (after emptying). CH4 : WasteStream Fugitive CH4 gas (after emptying). N2O : WasteStream Fugitive N2O gas (after emptying). ''' COD_rmd = waste.COD*(1-empty_ratio)/1e3*waste.F_vol CH4.imass['CH4'] += COD_rmd * CH4_factor N2O.imass['N2O'] += COD_rmd * N2O_factor waste.mass *= empty_ratio return waste, CH4, N2O @property def N_user(self): '''[float] Number of people that use the toilet per hour.''' return self._N_user @N_user.setter def N_user(self, i): self._N_user = float(i) @property def N_toilet(self): '''[float] Number of parallel toilets.''' return self._N_toilet @N_toilet.setter def N_toilet(self, i): self._N_toilet = float(i) @property def toilet_paper(self): ''' [float] Amount of toilet paper used (if `if_toilet_paper` is True), [kg/cap/hr]. ''' return self._toilet_paper @toilet_paper.setter def toilet_paper(self, i): self._toilet_paper = float(i) @property def flushing_water(self): ''' [float] Amount of water used for flushing (if `if_flushing_water` is True), [kg/cap/hr]. ''' return self._flushing_water @flushing_water.setter def flushing_water(self, i): self._flushing_water = float(i) @property def cleansing_water(self): ''' [float] Amount of water used for cleansing (if `if_cleansing_water` is True), [kg/cap/hr]. ''' return self._cleansing_water @cleansing_water.setter def cleansing_water(self, i): self._cleansing_water = float(i) @property def desiccant(self): ''' [float] Amount of desiccant used (if `if_desiccant` is True), [kg/cap/hr]. .. note:: Value set by `desiccant_V` and `desiccant_rho`. ''' return self.desiccant_V*self.desiccant_rho @property def N_volatilization(self): ''' [float] Fraction of input N that volatizes to the air (if `if_air_emission` is True). ''' return self._N_volatilization @N_volatilization.setter def N_volatilization(self, i): self._N_volatilization = float(i) @property def empty_ratio(self): ''' [float] Fraction of excreta that is appropriately emptied. .. note:: Will be 1 (i.e., 100%) if `if_ideal_emptying` is True. ''' if self.if_ideal_emptying: return 1. return self._empty_ratio @empty_ratio.setter def empty_ratio(self, i): if self.if_ideal_emptying: msg = f'`if_ideal_emptying` is True, the set value {i} is ignored.' warn(msg, source=self) self._empty_ratio = float(i) @property def MCF_aq(self): '''[float] Methane correction factor for COD lost due to inappropriate emptying.''' return self._MCF_aq @MCF_aq.setter def MCF_aq(self, i): self._MCF_aq = float(i) @property def N2O_EF_aq(self): '''[float] Fraction of N emitted as N2O due to inappropriate emptying.''' return self._N2O_EF_aq @N2O_EF_aq.setter def N2O_EF_aq(self, i): self._N2O_EF_aq = float(i)
0.658198
0.521654
import json import ahocorasick import pytest from detect_secrets.core.potential_secret import PotentialSecret from detect_secrets.plugins.keyword import KeywordDetector from testing.mocks import mock_file_object FOLLOWED_BY_COLON_EQUAL_SIGNS_RE = { 'negatives': { 'quotes_required': [ 'theapikey := ""', # Nothing in the quotes 'theapikey := "somefakekey"', # 'fake' in the secret ], 'quotes_not_required': [ 'theapikeyforfoo := hopenobodyfindsthisone', # Characters between apikey and := ], }, 'positives': { 'quotes_required': [ 'apikey := "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey :="m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey := "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', "apikey := 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", "apikey :='m{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", 'apikey:= "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey:="m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', "apikey:= 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", "apikey:='m{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", "apikey:= 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", ], 'quotes_not_required': [ 'apikey := m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'apikey :=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'apikey:= m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'apikey:=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'api-key:=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', ], }, } FOLLOWED_BY_COLON_RE = { 'negatives': { 'quotes_required': [ 'theapikey: ""', # Nothing in the quotes 'theapikey: "somefakekey"', # 'fake' in the secret ], 'quotes_not_required': [ 'theapikeyforfoo:hopenobodyfindsthisone', # Characters between apikey and : 'password: ${link}', # Has a ${ followed by a } ], }, 'positives': { 'quotes_required': [ "'theapikey': 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", '"theapikey": "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey: "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', "apikey: 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", ], 'quotes_not_required': [ 'apikey: m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'apikey:m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'theapikey:m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', ], }, } FOLLOWED_BY_EQUAL_SIGNS_OPTIONAL_BRACKETS_OPTIONAL_AT_SIGN_QUOTES_REQUIRED_REGEX = { 'negatives': { 'quotes_required': [ 'theapikey[] = ""', # Nothing in the quotes 'theapikey = @"somefakekey"', # 'fake' in the secret ], }, 'positives': { 'quotes_required': [ 'apikey = "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey ="m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey = "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey = @"m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey =@"m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey = @"m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey[]= "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey[]="m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', ], }, } FOLLOWED_BY_EQUAL_SIGNS_RE = { 'negatives': { 'quotes_required': [ 'some_key = "real_secret"', # We cannot make 'key' a Keyword, too noisy 'my_password = ""', # Nothing in the quotes "my_password = ''", # Nothing in the quotes 'my_password = "<PASSWORD>"', # 'fake' in the secret 'open(self, password = ""):', # secrets is ""): 'open(self, password = ""):', # secrets is ""): ], 'quotes_not_required': [ 'my_password = <PASSWORD>(<PASSWORD>', # Has a ( followed by a ) "my_password = request.json_body['hey']", # Has a [ followed by a ] 'my_password = True', # 'True' is a known false-positive 'login(username=username, password=password)', # secret is password) ], }, 'positives': { 'quotes_required': [ 'some_dict["secret"] = "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'the_password= "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"\n', 'the_password=\'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\'\n', ], 'quotes_not_required': [ "some_dict['secret'] = m{{h}o)p${e]nob(ody[finds>-_$#thisone}}", 'my_password=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'my_password= m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'my_password =m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'my_password = m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'my_password =m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'the_password=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\n', ], }, } FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE = { 'negatives': { 'quotes_required': [ 'private_key "";', # Nothing in the quotes 'private_key \'"no spaces\';', # Has whitespace in the secret 'private_key "fake";', # 'fake' in the secret 'private_key "some/dir/aint/a/secret";', # 3 or more / 'private_key "${FOO}";', # Starts with ${ and ends with } 'private_key "hopenobodyfindsthisone\';', # Double-quote does not match single-quote 'private_key \'hopenobodyfindsthisone";', # Single-quote does not match double-quote ], }, 'positives': { 'quotes_required': [ 'apikey "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}";', # Double-quotes 'fooapikeyfoo "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}";', # Double-quotes 'fooapikeyfoo"m{{h}o)p${e]nob(ody[finds>-_$#thisone}}";', # Double-quotes 'private_key \'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\';', # Single-quotes 'private-key \'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\';', # Single-quotes 'fooprivate_keyfoo\'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\';', # Single-quotes 'fooprivate_key\'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\';', # Single-quotes ], }, } QUOTES_REQUIRED_FILE_EXTENSIONS = ( '.cls', '.java', '.js', '.py', '.swift', ) STANDARD_NEGATIVES = [] STANDARD_POSITIVES = [] STANDARD_NEGATIVES.extend( FOLLOWED_BY_COLON_EQUAL_SIGNS_RE.get('negatives').get('quotes_required') + FOLLOWED_BY_COLON_EQUAL_SIGNS_RE.get('negatives').get('quotes_not_required') + FOLLOWED_BY_COLON_RE.get('negatives').get('quotes_required') + FOLLOWED_BY_COLON_RE.get('negatives').get('quotes_not_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('negatives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('negatives').get('quotes_not_required') + FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE.get('negatives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_OPTIONAL_BRACKETS_OPTIONAL_AT_SIGN_QUOTES_REQUIRED_REGEX.get( 'negatives', ).get('quotes_required'), ) STANDARD_POSITIVES.extend( FOLLOWED_BY_COLON_RE.get('positives').get('quotes_required') + FOLLOWED_BY_COLON_RE.get('positives').get('quotes_not_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_not_required') + FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE.get('positives').get('quotes_required'), ) class TestKeywordDetector: @pytest.mark.parametrize( 'file_content', STANDARD_POSITIVES, ) def test_analyze_standard_positives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename') assert len(output) == 1 for potential_secret in output: assert 'mock_filename' == potential_secret.filename assert ( potential_secret.secret_hash == PotentialSecret.hash_secret('m{{h}o)p${e]nob(ody[finds>-_$#thisone}}') ) @pytest.mark.parametrize( 'file_content', STANDARD_POSITIVES, ) def test_analyze_standard_positives_with_automaton(self, file_content): automaton = ahocorasick.Automaton() word = 'thisone' automaton.add_word(word, word) automaton.make_automaton() logic = KeywordDetector(automaton=automaton) f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename') # All skipped due to automaton assert len(output) == 0 @pytest.mark.parametrize( 'file_content', STANDARD_POSITIVES, ) def test_analyze_with_line_exclude(self, file_content): logic = KeywordDetector(keyword_exclude='thisone') f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.foo') assert len(output) == 0 @pytest.mark.parametrize( 'file_content, file_extension', ( (positive, file_extension) for positive in ( FOLLOWED_BY_COLON_RE.get('positives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_required') + FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE.get('positives').get('quotes_required') ) for file_extension in QUOTES_REQUIRED_FILE_EXTENSIONS ), ) def test_analyze_quotes_required_positives(self, file_content, file_extension): logic = KeywordDetector() f = mock_file_object(file_content) mock_filename = 'mock_filename{}'.format(file_extension) output = logic.analyze(f, mock_filename) assert len(output) == 1 for potential_secret in output: assert mock_filename == potential_secret.filename assert ( potential_secret.secret_hash == PotentialSecret.hash_secret('m{{h}o)p${e]nob(ody[finds>-_$#thisone}}') ) @pytest.mark.parametrize( 'file_content', FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_not_required') + FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE.get('positives').get('quotes_required') + FOLLOWED_BY_COLON_EQUAL_SIGNS_RE.get('positives').get('quotes_required') + FOLLOWED_BY_COLON_EQUAL_SIGNS_RE.get('positives').get('quotes_not_required'), ) def test_analyze_go_positives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.go') assert len(output) == 1 for potential_secret in output: assert 'mock_filename.go' == potential_secret.filename assert ( potential_secret.secret_hash == PotentialSecret.hash_secret('m{{h}o)p${e]nob(ody[finds>-_$#thisone}}') ) @pytest.mark.parametrize( 'file_content', FOLLOWED_BY_EQUAL_SIGNS_OPTIONAL_BRACKETS_OPTIONAL_AT_SIGN_QUOTES_REQUIRED_REGEX.get( 'positives', ).get('quotes_required'), ) def test_analyze_objective_c_positives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.m') assert len(output) == 1 for potential_secret in output: assert 'mock_filename.m' == potential_secret.filename assert ( potential_secret.secret_hash == PotentialSecret.hash_secret('m{{h}o)p${e]nob(ody[finds>-_$#thisone}}') ) @pytest.mark.parametrize( 'file_content', STANDARD_NEGATIVES, ) def test_analyze_standard_negatives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.foo') assert len(output) == 0 @pytest.mark.parametrize( 'file_content', STANDARD_NEGATIVES + [ # FOLLOWED_BY_COLON_RE 'apiKey: this.apiKey,', "apiKey: fs.readFileSync('foo',", ], ) def test_analyze_javascript_negatives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.js') assert len(output) == 0 @pytest.mark.parametrize( 'file_content', STANDARD_NEGATIVES + [ # FOLLOWED_BY_EQUAL_SIGNS_RE '$password = <PASSWORD>;', ], ) def test_analyze_php_negatives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.php') assert len(output) == 0 @pytest.mark.parametrize( 'file_content, file_extension', ( (negative, file_extension) for negative in ( STANDARD_NEGATIVES + [ # FOLLOWED_BY_COLON_QUOTES_REQUIRED_RE 'apikey: hope]nobody[finds>-_$#thisone', 'apikey:hope]nobody[finds>-_$#thisone', 'theapikey:hope]nobody[finds>-_$#thisone', # FOLLOWED_BY_EQUAL_SIGNS_QUOTES_REQUIRED_RE "some_dict['secret'] = hope]nobody[finds>-_$#thisone", 'my_password=hope]nobody[finds>-_$#thisone', 'my_password= hope]nobody[finds>-_$#thisone', 'my_password =hope]nobody[finds>-_$#thisone', 'my_password = hope]nobody[finds>-_$#thisone', 'my_password =hope]nobody[finds>-_$#thisone', 'the_password=hope]nobody[finds>-_$#this<PASSWORD>\n', ] ) for file_extension in QUOTES_REQUIRED_FILE_EXTENSIONS ), ) def test_analyze_quotes_required_negatives(self, file_content, file_extension): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze( f, 'mock_filename{}'.format(file_extension), ) assert len(output) == 0 @pytest.mark.parametrize( 'file_content, file_extension', ( (standard_positive, file_extension) for standard_positive in STANDARD_POSITIVES for file_extension in ( '.yaml', '.yml', ) ), ) def test_analyze_yaml_negatives(self, file_content, file_extension): logic = KeywordDetector() # Make it start with `{{`, (and end with `}}`) so it hits our false-positive check f = mock_file_object(file_content.replace('m{', '{')) output = logic.analyze( f, 'mock_filename{}'.format(file_extension), ) assert len(output) == 0 @pytest.mark.parametrize( 'file_content', STANDARD_POSITIVES, ) def test_analyze_example_negatives(self, file_content): logic = KeywordDetector() # Make it start with `<`, (and end with `>`) so it hits our false-positive check f = mock_file_object( file_content.replace('m{', '<').replace('}', '>'), ) output = logic.analyze( f, 'mock_filename.example', ) assert len(output) == 0 @pytest.mark.parametrize( 'keyword_exclude, dict_content', ( ( None, {'keyword_exclude': None, 'name': 'KeywordDetector'}, ), ( 'keyword', {'keyword_exclude': 'keyword', 'name': 'KeywordDetector'}, ), ( 'a.*|b.*', {'keyword_exclude': 'a.*|b.*', 'name': 'KeywordDetector'}, ), ), ) def test_dict_output(self, keyword_exclude, dict_content): detector = KeywordDetector(keyword_exclude) actual = json.dumps( detector.__dict__, sort_keys=True, ) expected = json.dumps( dict_content, sort_keys=True, ) assert actual == expected
tests/plugins/keyword_test.py
import json import ahocorasick import pytest from detect_secrets.core.potential_secret import PotentialSecret from detect_secrets.plugins.keyword import KeywordDetector from testing.mocks import mock_file_object FOLLOWED_BY_COLON_EQUAL_SIGNS_RE = { 'negatives': { 'quotes_required': [ 'theapikey := ""', # Nothing in the quotes 'theapikey := "somefakekey"', # 'fake' in the secret ], 'quotes_not_required': [ 'theapikeyforfoo := hopenobodyfindsthisone', # Characters between apikey and := ], }, 'positives': { 'quotes_required': [ 'apikey := "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey :="m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey := "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', "apikey := 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", "apikey :='m{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", 'apikey:= "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey:="m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', "apikey:= 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", "apikey:='m{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", "apikey:= 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", ], 'quotes_not_required': [ 'apikey := m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'apikey :=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'apikey:= m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'apikey:=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'api-key:=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', ], }, } FOLLOWED_BY_COLON_RE = { 'negatives': { 'quotes_required': [ 'theapikey: ""', # Nothing in the quotes 'theapikey: "somefakekey"', # 'fake' in the secret ], 'quotes_not_required': [ 'theapikeyforfoo:hopenobodyfindsthisone', # Characters between apikey and : 'password: ${link}', # Has a ${ followed by a } ], }, 'positives': { 'quotes_required': [ "'theapikey': 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", '"theapikey": "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey: "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', "apikey: 'm{{h}o)p${e]nob(ody[finds>-_$#thisone}}'", ], 'quotes_not_required': [ 'apikey: m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'apikey:m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'theapikey:m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', ], }, } FOLLOWED_BY_EQUAL_SIGNS_OPTIONAL_BRACKETS_OPTIONAL_AT_SIGN_QUOTES_REQUIRED_REGEX = { 'negatives': { 'quotes_required': [ 'theapikey[] = ""', # Nothing in the quotes 'theapikey = @"somefakekey"', # 'fake' in the secret ], }, 'positives': { 'quotes_required': [ 'apikey = "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey ="m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey = "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey = @"m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey =@"m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey = @"m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey[]= "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'apikey[]="m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', ], }, } FOLLOWED_BY_EQUAL_SIGNS_RE = { 'negatives': { 'quotes_required': [ 'some_key = "real_secret"', # We cannot make 'key' a Keyword, too noisy 'my_password = ""', # Nothing in the quotes "my_password = ''", # Nothing in the quotes 'my_password = "<PASSWORD>"', # 'fake' in the secret 'open(self, password = ""):', # secrets is ""): 'open(self, password = ""):', # secrets is ""): ], 'quotes_not_required': [ 'my_password = <PASSWORD>(<PASSWORD>', # Has a ( followed by a ) "my_password = request.json_body['hey']", # Has a [ followed by a ] 'my_password = True', # 'True' is a known false-positive 'login(username=username, password=password)', # secret is password) ], }, 'positives': { 'quotes_required': [ 'some_dict["secret"] = "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"', 'the_password= "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}"\n', 'the_password=\'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\'\n', ], 'quotes_not_required': [ "some_dict['secret'] = m{{h}o)p${e]nob(ody[finds>-_$#thisone}}", 'my_password=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'my_password= m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'my_password =m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'my_password = m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'my_password =m{{h}o)p${e]nob(ody[finds>-_$#thisone}}', 'the_password=m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\n', ], }, } FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE = { 'negatives': { 'quotes_required': [ 'private_key "";', # Nothing in the quotes 'private_key \'"no spaces\';', # Has whitespace in the secret 'private_key "fake";', # 'fake' in the secret 'private_key "some/dir/aint/a/secret";', # 3 or more / 'private_key "${FOO}";', # Starts with ${ and ends with } 'private_key "hopenobodyfindsthisone\';', # Double-quote does not match single-quote 'private_key \'hopenobodyfindsthisone";', # Single-quote does not match double-quote ], }, 'positives': { 'quotes_required': [ 'apikey "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}";', # Double-quotes 'fooapikeyfoo "m{{h}o)p${e]nob(ody[finds>-_$#thisone}}";', # Double-quotes 'fooapikeyfoo"m{{h}o)p${e]nob(ody[finds>-_$#thisone}}";', # Double-quotes 'private_key \'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\';', # Single-quotes 'private-key \'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\';', # Single-quotes 'fooprivate_keyfoo\'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\';', # Single-quotes 'fooprivate_key\'m{{h}o)p${e]nob(ody[finds>-_$#thisone}}\';', # Single-quotes ], }, } QUOTES_REQUIRED_FILE_EXTENSIONS = ( '.cls', '.java', '.js', '.py', '.swift', ) STANDARD_NEGATIVES = [] STANDARD_POSITIVES = [] STANDARD_NEGATIVES.extend( FOLLOWED_BY_COLON_EQUAL_SIGNS_RE.get('negatives').get('quotes_required') + FOLLOWED_BY_COLON_EQUAL_SIGNS_RE.get('negatives').get('quotes_not_required') + FOLLOWED_BY_COLON_RE.get('negatives').get('quotes_required') + FOLLOWED_BY_COLON_RE.get('negatives').get('quotes_not_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('negatives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('negatives').get('quotes_not_required') + FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE.get('negatives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_OPTIONAL_BRACKETS_OPTIONAL_AT_SIGN_QUOTES_REQUIRED_REGEX.get( 'negatives', ).get('quotes_required'), ) STANDARD_POSITIVES.extend( FOLLOWED_BY_COLON_RE.get('positives').get('quotes_required') + FOLLOWED_BY_COLON_RE.get('positives').get('quotes_not_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_not_required') + FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE.get('positives').get('quotes_required'), ) class TestKeywordDetector: @pytest.mark.parametrize( 'file_content', STANDARD_POSITIVES, ) def test_analyze_standard_positives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename') assert len(output) == 1 for potential_secret in output: assert 'mock_filename' == potential_secret.filename assert ( potential_secret.secret_hash == PotentialSecret.hash_secret('m{{h}o)p${e]nob(ody[finds>-_$#thisone}}') ) @pytest.mark.parametrize( 'file_content', STANDARD_POSITIVES, ) def test_analyze_standard_positives_with_automaton(self, file_content): automaton = ahocorasick.Automaton() word = 'thisone' automaton.add_word(word, word) automaton.make_automaton() logic = KeywordDetector(automaton=automaton) f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename') # All skipped due to automaton assert len(output) == 0 @pytest.mark.parametrize( 'file_content', STANDARD_POSITIVES, ) def test_analyze_with_line_exclude(self, file_content): logic = KeywordDetector(keyword_exclude='thisone') f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.foo') assert len(output) == 0 @pytest.mark.parametrize( 'file_content, file_extension', ( (positive, file_extension) for positive in ( FOLLOWED_BY_COLON_RE.get('positives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_required') + FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE.get('positives').get('quotes_required') ) for file_extension in QUOTES_REQUIRED_FILE_EXTENSIONS ), ) def test_analyze_quotes_required_positives(self, file_content, file_extension): logic = KeywordDetector() f = mock_file_object(file_content) mock_filename = 'mock_filename{}'.format(file_extension) output = logic.analyze(f, mock_filename) assert len(output) == 1 for potential_secret in output: assert mock_filename == potential_secret.filename assert ( potential_secret.secret_hash == PotentialSecret.hash_secret('m{{h}o)p${e]nob(ody[finds>-_$#thisone}}') ) @pytest.mark.parametrize( 'file_content', FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_required') + FOLLOWED_BY_EQUAL_SIGNS_RE.get('positives').get('quotes_not_required') + FOLLOWED_BY_QUOTES_AND_SEMICOLON_RE.get('positives').get('quotes_required') + FOLLOWED_BY_COLON_EQUAL_SIGNS_RE.get('positives').get('quotes_required') + FOLLOWED_BY_COLON_EQUAL_SIGNS_RE.get('positives').get('quotes_not_required'), ) def test_analyze_go_positives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.go') assert len(output) == 1 for potential_secret in output: assert 'mock_filename.go' == potential_secret.filename assert ( potential_secret.secret_hash == PotentialSecret.hash_secret('m{{h}o)p${e]nob(ody[finds>-_$#thisone}}') ) @pytest.mark.parametrize( 'file_content', FOLLOWED_BY_EQUAL_SIGNS_OPTIONAL_BRACKETS_OPTIONAL_AT_SIGN_QUOTES_REQUIRED_REGEX.get( 'positives', ).get('quotes_required'), ) def test_analyze_objective_c_positives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.m') assert len(output) == 1 for potential_secret in output: assert 'mock_filename.m' == potential_secret.filename assert ( potential_secret.secret_hash == PotentialSecret.hash_secret('m{{h}o)p${e]nob(ody[finds>-_$#thisone}}') ) @pytest.mark.parametrize( 'file_content', STANDARD_NEGATIVES, ) def test_analyze_standard_negatives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.foo') assert len(output) == 0 @pytest.mark.parametrize( 'file_content', STANDARD_NEGATIVES + [ # FOLLOWED_BY_COLON_RE 'apiKey: this.apiKey,', "apiKey: fs.readFileSync('foo',", ], ) def test_analyze_javascript_negatives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.js') assert len(output) == 0 @pytest.mark.parametrize( 'file_content', STANDARD_NEGATIVES + [ # FOLLOWED_BY_EQUAL_SIGNS_RE '$password = <PASSWORD>;', ], ) def test_analyze_php_negatives(self, file_content): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze(f, 'mock_filename.php') assert len(output) == 0 @pytest.mark.parametrize( 'file_content, file_extension', ( (negative, file_extension) for negative in ( STANDARD_NEGATIVES + [ # FOLLOWED_BY_COLON_QUOTES_REQUIRED_RE 'apikey: hope]nobody[finds>-_$#thisone', 'apikey:hope]nobody[finds>-_$#thisone', 'theapikey:hope]nobody[finds>-_$#thisone', # FOLLOWED_BY_EQUAL_SIGNS_QUOTES_REQUIRED_RE "some_dict['secret'] = hope]nobody[finds>-_$#thisone", 'my_password=hope]nobody[finds>-_$#thisone', 'my_password= hope]nobody[finds>-_$#thisone', 'my_password =hope]nobody[finds>-_$#thisone', 'my_password = hope]nobody[finds>-_$#thisone', 'my_password =hope]nobody[finds>-_$#thisone', 'the_password=hope]nobody[finds>-_$#this<PASSWORD>\n', ] ) for file_extension in QUOTES_REQUIRED_FILE_EXTENSIONS ), ) def test_analyze_quotes_required_negatives(self, file_content, file_extension): logic = KeywordDetector() f = mock_file_object(file_content) output = logic.analyze( f, 'mock_filename{}'.format(file_extension), ) assert len(output) == 0 @pytest.mark.parametrize( 'file_content, file_extension', ( (standard_positive, file_extension) for standard_positive in STANDARD_POSITIVES for file_extension in ( '.yaml', '.yml', ) ), ) def test_analyze_yaml_negatives(self, file_content, file_extension): logic = KeywordDetector() # Make it start with `{{`, (and end with `}}`) so it hits our false-positive check f = mock_file_object(file_content.replace('m{', '{')) output = logic.analyze( f, 'mock_filename{}'.format(file_extension), ) assert len(output) == 0 @pytest.mark.parametrize( 'file_content', STANDARD_POSITIVES, ) def test_analyze_example_negatives(self, file_content): logic = KeywordDetector() # Make it start with `<`, (and end with `>`) so it hits our false-positive check f = mock_file_object( file_content.replace('m{', '<').replace('}', '>'), ) output = logic.analyze( f, 'mock_filename.example', ) assert len(output) == 0 @pytest.mark.parametrize( 'keyword_exclude, dict_content', ( ( None, {'keyword_exclude': None, 'name': 'KeywordDetector'}, ), ( 'keyword', {'keyword_exclude': 'keyword', 'name': 'KeywordDetector'}, ), ( 'a.*|b.*', {'keyword_exclude': 'a.*|b.*', 'name': 'KeywordDetector'}, ), ), ) def test_dict_output(self, keyword_exclude, dict_content): detector = KeywordDetector(keyword_exclude) actual = json.dumps( detector.__dict__, sort_keys=True, ) expected = json.dumps( dict_content, sort_keys=True, ) assert actual == expected
0.216757
0.160069
import argparse import sys import numpy as np import stl def _argparser(): p = argparse.ArgumentParser(description='Tool for cloning objects in STL') p.add_argument('-nx', type=int, default=1, help='Number of clones in X direction') p.add_argument('-ny', type=int, default=1, help='Number of clones in Y direction') p.add_argument('-dx', type=float, default=0, help='Delta in X direction') p.add_argument('-dy', type=float, default=0, help='Delta in Y direction') p.add_argument('-gap', type=float, default=-1, help='Gap between bounding boxes for auto placement (set this or -dx and -dy)') p.add_argument('-o', type=str, help='Output file') p.add_argument('-i', type=str, required=True, help='Input file') p.add_argument('-ascii', action='store_true', help='Ascii output') return p def main(argv): args = _argparser().parse_args(argv[1:]) in_file = args.i if args.o: out_file = args.o else: out_file = in_file + '_out.stl' print('output is going to', out_file) nx,ny,dx,dy = args.nx,args.ny,args.dx,args.dy mesh = stl.Mesh.from_file(in_file) if args.gap>=0: bbox_size = mesh.max_ - mesh.min_ if dx==0: dx = bbox_size[stl.Dimension.X] + args.gap if dy==0: dy = bbox_size[stl.Dimension.Y] + args.gap print('Auto delta:',(dx,dy)) nt = mesh.data.shape[0] # number of triangles print("Original mesh size:", nt) data_repl = np.tile(mesh.data, nx*ny) deltas_x = np.tile(np.arange(nx, dtype=np.float32)*dx, ny) deltas_x = np.repeat(deltas_x, nt*3).reshape((-1,3)) deltas_y = np.repeat(np.arange(ny, dtype=np.float32)*dy, nx) deltas_y = np.repeat(deltas_y, nt*3).reshape((-1,3)) data_repl['vectors'][:, :, stl.Dimension.X] += deltas_x data_repl['vectors'][:, :, stl.Dimension.Y] += deltas_y mesh_repl = stl.Mesh(data_repl) print("Replicated mesh size:", mesh_repl.data.shape[0]) mesh_repl.save(out_file, mode=stl.Mode.ASCII if args.ascii else stl.Mode.BINARY) return 0 if __name__ == "__main__": sys.exit(main(sys.argv))
stlclone.py
import argparse import sys import numpy as np import stl def _argparser(): p = argparse.ArgumentParser(description='Tool for cloning objects in STL') p.add_argument('-nx', type=int, default=1, help='Number of clones in X direction') p.add_argument('-ny', type=int, default=1, help='Number of clones in Y direction') p.add_argument('-dx', type=float, default=0, help='Delta in X direction') p.add_argument('-dy', type=float, default=0, help='Delta in Y direction') p.add_argument('-gap', type=float, default=-1, help='Gap between bounding boxes for auto placement (set this or -dx and -dy)') p.add_argument('-o', type=str, help='Output file') p.add_argument('-i', type=str, required=True, help='Input file') p.add_argument('-ascii', action='store_true', help='Ascii output') return p def main(argv): args = _argparser().parse_args(argv[1:]) in_file = args.i if args.o: out_file = args.o else: out_file = in_file + '_out.stl' print('output is going to', out_file) nx,ny,dx,dy = args.nx,args.ny,args.dx,args.dy mesh = stl.Mesh.from_file(in_file) if args.gap>=0: bbox_size = mesh.max_ - mesh.min_ if dx==0: dx = bbox_size[stl.Dimension.X] + args.gap if dy==0: dy = bbox_size[stl.Dimension.Y] + args.gap print('Auto delta:',(dx,dy)) nt = mesh.data.shape[0] # number of triangles print("Original mesh size:", nt) data_repl = np.tile(mesh.data, nx*ny) deltas_x = np.tile(np.arange(nx, dtype=np.float32)*dx, ny) deltas_x = np.repeat(deltas_x, nt*3).reshape((-1,3)) deltas_y = np.repeat(np.arange(ny, dtype=np.float32)*dy, nx) deltas_y = np.repeat(deltas_y, nt*3).reshape((-1,3)) data_repl['vectors'][:, :, stl.Dimension.X] += deltas_x data_repl['vectors'][:, :, stl.Dimension.Y] += deltas_y mesh_repl = stl.Mesh(data_repl) print("Replicated mesh size:", mesh_repl.data.shape[0]) mesh_repl.save(out_file, mode=stl.Mode.ASCII if args.ascii else stl.Mode.BINARY) return 0 if __name__ == "__main__": sys.exit(main(sys.argv))
0.206734
0.125923
"""Face attribute train.""" import os import time import datetime import mindspore import mindspore.nn as nn from mindspore import context from mindspore import Tensor from mindspore.nn.optim import Momentum from mindspore.communication.management import get_group_size, init, get_rank from mindspore.nn import TrainOneStepCell from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, RunContext, CheckpointConfig from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.common import dtype as mstype from src.FaceAttribute.resnet18 import get_resnet18 from src.FaceAttribute.loss_factory import get_loss from src.dataset_train import data_generator from src.lrsche_factory import warmup_step from src.log import get_logger, AverageMeter from model_utils.config import config from model_utils.moxing_adapter import moxing_wrapper from model_utils.device_adapter import get_device_id, get_device_num class InternalCallbackParam(dict): """Internal callback object's parameters.""" def __getattr__(self, _key): return self[_key] def __setattr__(self, _key, _value): self[_key] = _value class BuildTrainNetwork(nn.Cell): '''Build train network.''' def __init__(self, my_network, my_criterion): super(BuildTrainNetwork, self).__init__() self.network = my_network self.criterion = my_criterion def construct(self, input_data, label): logit0, logit1, logit2 = self.network(input_data) loss0 = self.criterion(logit0, logit1, logit2, label) return loss0 def modelarts_pre_process(): '''modelarts pre process function.''' def unzip(zip_file, save_dir): import zipfile s_time = time.time() if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)): zip_isexist = zipfile.is_zipfile(zip_file) if zip_isexist: fz = zipfile.ZipFile(zip_file, 'r') data_num = len(fz.namelist()) print("Extract Start...") print("unzip file num: {}".format(data_num)) data_print = int(data_num / 100) if data_num > 100 else 1 i = 0 for file in fz.namelist(): if i % data_print == 0: print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True) i += 1 fz.extract(file, save_dir) print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60), int(int(time.time() - s_time) % 60))) print("Extract Done.") else: print("This is not zip.") else: print("Zip has been extracted.") if config.need_modelarts_dataset_unzip: zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip") save_dir_1 = os.path.join(config.data_path) sync_lock = "/tmp/unzip_sync.lock" # Each server contains 8 devices as most. if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock): print("Zip file path: ", zip_file_1) print("Unzip file save dir: ", save_dir_1) unzip(zip_file_1, save_dir_1) print("===Finish extract data synchronization===") try: os.mknod(sync_lock) except IOError: pass while True: if os.path.exists(sync_lock): break time.sleep(1) print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1)) config.ckpt_path = os.path.join(config.output_path, config.ckpt_path) @moxing_wrapper(pre_process=modelarts_pre_process) def run_train(): '''run train.''' context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, save_graphs=False, device_id=get_device_id()) mindspore.set_seed(1) # init distributed if config.world_size != 1: init() config.local_rank = get_rank() config.world_size = get_group_size() config.lr = config.lr * 4. parallel_mode = ParallelMode.DATA_PARALLEL else: config.per_batch_size = 256 parallel_mode = ParallelMode.STAND_ALONE context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=config.world_size) config.outputs_dir = os.path.join(config.ckpt_path, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S')) config.logger = get_logger(config.outputs_dir, config.local_rank) loss_meter = AverageMeter('loss') # dataloader config.logger.info('start create dataloader') de_dataloader, steps_per_epoch, num_classes = data_generator(config) config.steps_per_epoch = steps_per_epoch config.num_classes = num_classes config.logger.info('end create dataloader') config.logger.save_args(config) # backbone && loss && load pretrain model config.logger.important_info('start create network') create_network_start = time.time() network = get_resnet18(config) criterion = get_loss() if os.path.isfile(config.pretrained): param_dict = load_checkpoint(config.pretrained) param_dict_new = {} for key, values in param_dict.items(): if key.startswith('moments.'): continue elif key.startswith('network.'): param_dict_new[key[8:]] = values else: param_dict_new[key] = values load_param_into_net(network, param_dict_new) config.logger.info('load model %s success', config.pretrained) # optimizer and lr scheduler lr = warmup_step(config, gamma=0.1) opt = Momentum(params=network.trainable_params(), learning_rate=lr, momentum=config.momentum, weight_decay=config.weight_decay, loss_scale=config.loss_scale) train_net = BuildTrainNetwork(network, criterion) # mixed precision training criterion.add_flags_recursive(fp32=True) train_net = TrainOneStepCell(train_net, opt, sens=config.loss_scale) if config.local_rank == 0: ckpt_max_num = config.max_epoch train_config = CheckpointConfig(save_checkpoint_steps=config.steps_per_epoch, keep_checkpoint_max=ckpt_max_num) ckpt_cb = ModelCheckpoint(config=train_config, directory=config.outputs_dir, prefix='{}'.format(config.local_rank)) cb_params = InternalCallbackParam() cb_params.train_network = train_net cb_params.epoch_num = ckpt_max_num cb_params.cur_epoch_num = 0 run_context = RunContext(cb_params) ckpt_cb.begin(run_context) train_net.set_train() t_end = time.time() t_epoch = time.time() old_progress = -1 i = 0 for _, (data, gt_classes) in enumerate(de_dataloader): data_tensor = Tensor(data, dtype=mstype.float32) gt_tensor = Tensor(gt_classes, dtype=mstype.int32) loss = train_net(data_tensor, gt_tensor) loss_meter.update(loss.asnumpy()[0]) if config.local_rank == 0: cb_params.cur_step_num = i + 1 cb_params.batch_num = i + 2 ckpt_cb.step_end(run_context) if (i + 1) % config.steps_per_epoch == 0 and config.local_rank == 0: cb_params.cur_epoch_num += 1 if i == 0: time_for_graph_compile = time.time() - create_network_start config.logger.important_info( '{}, graph compile time={:.2f}s'.format(config.backbone, time_for_graph_compile)) if (i + 1) % config.log_interval == 0 and config.local_rank == 0: time_used = time.time() - t_end epoch = int((i + 1) / config.steps_per_epoch) fps = config.per_batch_size * (i - old_progress) * config.world_size / time_used config.logger.info('epoch[{}], iter[{}], {}, {:.2f} imgs/sec'.format(epoch, i + 1, loss_meter, fps)) t_end = time.time() loss_meter.reset() old_progress = i if (i + 1) % config.steps_per_epoch == 0 and config.local_rank == 0: epoch_time_used = time.time() - t_epoch epoch = int((i + 1) / config.steps_per_epoch) fps = config.per_batch_size * config.world_size * config.steps_per_epoch / epoch_time_used per_step_time = epoch_time_used / config.steps_per_epoch config.logger.info('=================================================') config.logger.info('epoch[{}], iter[{}], {:.2f} imgs/sec'.format(epoch, i + 1, fps)) config.logger.info('epoch[{}], epoch time: {:5.3f} ms, per step time: {:5.3f} ms'.format( epoch, epoch_time_used * 1000, per_step_time * 1000)) config.logger.info('=================================================') t_epoch = time.time() i += 1 config.logger.info('--------- trains out ---------') if __name__ == "__main__": run_train()
research/cv/FaceAttribute/train.py
"""Face attribute train.""" import os import time import datetime import mindspore import mindspore.nn as nn from mindspore import context from mindspore import Tensor from mindspore.nn.optim import Momentum from mindspore.communication.management import get_group_size, init, get_rank from mindspore.nn import TrainOneStepCell from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, RunContext, CheckpointConfig from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.common import dtype as mstype from src.FaceAttribute.resnet18 import get_resnet18 from src.FaceAttribute.loss_factory import get_loss from src.dataset_train import data_generator from src.lrsche_factory import warmup_step from src.log import get_logger, AverageMeter from model_utils.config import config from model_utils.moxing_adapter import moxing_wrapper from model_utils.device_adapter import get_device_id, get_device_num class InternalCallbackParam(dict): """Internal callback object's parameters.""" def __getattr__(self, _key): return self[_key] def __setattr__(self, _key, _value): self[_key] = _value class BuildTrainNetwork(nn.Cell): '''Build train network.''' def __init__(self, my_network, my_criterion): super(BuildTrainNetwork, self).__init__() self.network = my_network self.criterion = my_criterion def construct(self, input_data, label): logit0, logit1, logit2 = self.network(input_data) loss0 = self.criterion(logit0, logit1, logit2, label) return loss0 def modelarts_pre_process(): '''modelarts pre process function.''' def unzip(zip_file, save_dir): import zipfile s_time = time.time() if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)): zip_isexist = zipfile.is_zipfile(zip_file) if zip_isexist: fz = zipfile.ZipFile(zip_file, 'r') data_num = len(fz.namelist()) print("Extract Start...") print("unzip file num: {}".format(data_num)) data_print = int(data_num / 100) if data_num > 100 else 1 i = 0 for file in fz.namelist(): if i % data_print == 0: print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True) i += 1 fz.extract(file, save_dir) print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60), int(int(time.time() - s_time) % 60))) print("Extract Done.") else: print("This is not zip.") else: print("Zip has been extracted.") if config.need_modelarts_dataset_unzip: zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip") save_dir_1 = os.path.join(config.data_path) sync_lock = "/tmp/unzip_sync.lock" # Each server contains 8 devices as most. if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock): print("Zip file path: ", zip_file_1) print("Unzip file save dir: ", save_dir_1) unzip(zip_file_1, save_dir_1) print("===Finish extract data synchronization===") try: os.mknod(sync_lock) except IOError: pass while True: if os.path.exists(sync_lock): break time.sleep(1) print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1)) config.ckpt_path = os.path.join(config.output_path, config.ckpt_path) @moxing_wrapper(pre_process=modelarts_pre_process) def run_train(): '''run train.''' context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, save_graphs=False, device_id=get_device_id()) mindspore.set_seed(1) # init distributed if config.world_size != 1: init() config.local_rank = get_rank() config.world_size = get_group_size() config.lr = config.lr * 4. parallel_mode = ParallelMode.DATA_PARALLEL else: config.per_batch_size = 256 parallel_mode = ParallelMode.STAND_ALONE context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=config.world_size) config.outputs_dir = os.path.join(config.ckpt_path, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S')) config.logger = get_logger(config.outputs_dir, config.local_rank) loss_meter = AverageMeter('loss') # dataloader config.logger.info('start create dataloader') de_dataloader, steps_per_epoch, num_classes = data_generator(config) config.steps_per_epoch = steps_per_epoch config.num_classes = num_classes config.logger.info('end create dataloader') config.logger.save_args(config) # backbone && loss && load pretrain model config.logger.important_info('start create network') create_network_start = time.time() network = get_resnet18(config) criterion = get_loss() if os.path.isfile(config.pretrained): param_dict = load_checkpoint(config.pretrained) param_dict_new = {} for key, values in param_dict.items(): if key.startswith('moments.'): continue elif key.startswith('network.'): param_dict_new[key[8:]] = values else: param_dict_new[key] = values load_param_into_net(network, param_dict_new) config.logger.info('load model %s success', config.pretrained) # optimizer and lr scheduler lr = warmup_step(config, gamma=0.1) opt = Momentum(params=network.trainable_params(), learning_rate=lr, momentum=config.momentum, weight_decay=config.weight_decay, loss_scale=config.loss_scale) train_net = BuildTrainNetwork(network, criterion) # mixed precision training criterion.add_flags_recursive(fp32=True) train_net = TrainOneStepCell(train_net, opt, sens=config.loss_scale) if config.local_rank == 0: ckpt_max_num = config.max_epoch train_config = CheckpointConfig(save_checkpoint_steps=config.steps_per_epoch, keep_checkpoint_max=ckpt_max_num) ckpt_cb = ModelCheckpoint(config=train_config, directory=config.outputs_dir, prefix='{}'.format(config.local_rank)) cb_params = InternalCallbackParam() cb_params.train_network = train_net cb_params.epoch_num = ckpt_max_num cb_params.cur_epoch_num = 0 run_context = RunContext(cb_params) ckpt_cb.begin(run_context) train_net.set_train() t_end = time.time() t_epoch = time.time() old_progress = -1 i = 0 for _, (data, gt_classes) in enumerate(de_dataloader): data_tensor = Tensor(data, dtype=mstype.float32) gt_tensor = Tensor(gt_classes, dtype=mstype.int32) loss = train_net(data_tensor, gt_tensor) loss_meter.update(loss.asnumpy()[0]) if config.local_rank == 0: cb_params.cur_step_num = i + 1 cb_params.batch_num = i + 2 ckpt_cb.step_end(run_context) if (i + 1) % config.steps_per_epoch == 0 and config.local_rank == 0: cb_params.cur_epoch_num += 1 if i == 0: time_for_graph_compile = time.time() - create_network_start config.logger.important_info( '{}, graph compile time={:.2f}s'.format(config.backbone, time_for_graph_compile)) if (i + 1) % config.log_interval == 0 and config.local_rank == 0: time_used = time.time() - t_end epoch = int((i + 1) / config.steps_per_epoch) fps = config.per_batch_size * (i - old_progress) * config.world_size / time_used config.logger.info('epoch[{}], iter[{}], {}, {:.2f} imgs/sec'.format(epoch, i + 1, loss_meter, fps)) t_end = time.time() loss_meter.reset() old_progress = i if (i + 1) % config.steps_per_epoch == 0 and config.local_rank == 0: epoch_time_used = time.time() - t_epoch epoch = int((i + 1) / config.steps_per_epoch) fps = config.per_batch_size * config.world_size * config.steps_per_epoch / epoch_time_used per_step_time = epoch_time_used / config.steps_per_epoch config.logger.info('=================================================') config.logger.info('epoch[{}], iter[{}], {:.2f} imgs/sec'.format(epoch, i + 1, fps)) config.logger.info('epoch[{}], epoch time: {:5.3f} ms, per step time: {:5.3f} ms'.format( epoch, epoch_time_used * 1000, per_step_time * 1000)) config.logger.info('=================================================') t_epoch = time.time() i += 1 config.logger.info('--------- trains out ---------') if __name__ == "__main__": run_train()
0.69368
0.120594
import os import sys import logging # third pary librarys from elevate import elevate # local modules import subcmd class Install(subcmd.SubCmd): """ Install this application into OS's path library In Linux, create the symbloic link in the /usr/local/bin In Windows, append application folder into system path variable. """ WINBAT='qsmcli.bat' def install(self, arg=None): if ('posix' == os.name): self.install_in_linux(arg) else: self.install_in_win(arg) def install_in_linux(self, arg): """ Create the symbolic name in /usr/local/bin directory """ LOCAL_BIN='/usr/local/bin/' # elevate(graphical=False) elevate() basename = os.path.splitext(os.path.basename(sys.argv[0]))[0] print(basename) src = os.path.abspath(sys.argv[0]) dest = LOCAL_BIN+basename if os.path.isfile(dest): logging.warning("Destination file %s exist, overwriting it.", dest) os.unlink(dest) logging.critical("Creating symbolic link %s to %s ", dest, src) os.symlink(src, dest) def install_in_win(self, arg): """ create a bat file in the window32 folder and start this application """ # We would like to put the file in system32 folderself. # However, the windows will redirect the system32 directory call # so use the sysnative folder actuall point to system32 folder sys32dir = os.environ['WINDIR'] + '\\sysnative\\' winbat_file = sys32dir + Install.WINBAT try: # Check if file pre-exist and file permission if os.path.isfile(winbat_file): print("Destination file %s exist, overwriting it." % winbat_file) os.unlink(winbat_file) if os.path.isfile(winbat_file): print("Error! file can't be deleted") return # Create the batch file pgm = os.path.abspath(sys.argv[0]) with open(winbat_file, 'w') as bat_file: print("creating %s" % winbat_file) bat_file.write("start \"\" \"%s\" %*" % pgm) except IOError: print("Write file error, run program as administrator") return except: print("Please return error to author for your money back") def __init__(self): self.subs = self.install
src/install.py
import os import sys import logging # third pary librarys from elevate import elevate # local modules import subcmd class Install(subcmd.SubCmd): """ Install this application into OS's path library In Linux, create the symbloic link in the /usr/local/bin In Windows, append application folder into system path variable. """ WINBAT='qsmcli.bat' def install(self, arg=None): if ('posix' == os.name): self.install_in_linux(arg) else: self.install_in_win(arg) def install_in_linux(self, arg): """ Create the symbolic name in /usr/local/bin directory """ LOCAL_BIN='/usr/local/bin/' # elevate(graphical=False) elevate() basename = os.path.splitext(os.path.basename(sys.argv[0]))[0] print(basename) src = os.path.abspath(sys.argv[0]) dest = LOCAL_BIN+basename if os.path.isfile(dest): logging.warning("Destination file %s exist, overwriting it.", dest) os.unlink(dest) logging.critical("Creating symbolic link %s to %s ", dest, src) os.symlink(src, dest) def install_in_win(self, arg): """ create a bat file in the window32 folder and start this application """ # We would like to put the file in system32 folderself. # However, the windows will redirect the system32 directory call # so use the sysnative folder actuall point to system32 folder sys32dir = os.environ['WINDIR'] + '\\sysnative\\' winbat_file = sys32dir + Install.WINBAT try: # Check if file pre-exist and file permission if os.path.isfile(winbat_file): print("Destination file %s exist, overwriting it." % winbat_file) os.unlink(winbat_file) if os.path.isfile(winbat_file): print("Error! file can't be deleted") return # Create the batch file pgm = os.path.abspath(sys.argv[0]) with open(winbat_file, 'w') as bat_file: print("creating %s" % winbat_file) bat_file.write("start \"\" \"%s\" %*" % pgm) except IOError: print("Write file error, run program as administrator") return except: print("Please return error to author for your money back") def __init__(self): self.subs = self.install
0.220091
0.055413
# %% IMPORTS # Built-in imports from inspect import _VAR_KEYWORD, _VAR_POSITIONAL, _empty, isclass, signature from os import path import warnings # Package imports import e13tools as e13 from mpi4pyd.MPI import get_HybridComm_obj import numpy as np from numpy.random import rand from sortedcontainers import SortedDict as sdict # PRISM imports from prism._internal import RequestWarning, check_vals, np_array # All declaration __all__ = ['convert_data', 'convert_parameters', 'test_subclass'] # %% UTILITY FUNCTIONS # This function converts provided model data into format used by PRISM def convert_data(model_data): """ Converts the provided `model_data` into a full data dict, taking into account all formatting options, and returns it. This function can be used externally to check how the provided `model_data` would be interpreted when provided to the :class:`~prism.modellink.ModelLink` subclass. Its output can be used for the 'model_data' input argument. Parameters ---------- model_data : array_like, dict or str Anything that can be converted to a dict that provides model data information. Returns ------- data_dict : dict Dict with the provided `model_data` converted to its full format. """ # If a data file is given if isinstance(model_data, str): # Obtain absolute path to given file data_file = path.abspath(model_data) # Read the data file in as a string data_points = np.genfromtxt(data_file, dtype=(str), delimiter=':', autostrip=True) # Make sure that data_points is 2D data_points = np_array(data_points, ndmin=2) # Convert read-in data to dict model_data = dict(data_points) # If a data dict is given elif isinstance(model_data, dict): model_data = dict(model_data) # If anything else is given else: # Check if it can be converted to a dict try: model_data = dict(model_data) except Exception: raise TypeError("Input model data cannot be converted to type " "'dict'!") # Make empty data_dict data_dict = dict() # Loop over all items in model_data for key, value in model_data.items(): # Convert key to an actual data_idx idx = e13.split_seq(key) # Check if tmp_idx is not empty if not idx: raise e13.InputError("Model data contains a data point with no " "identifier!") # Convert value to an actual data point data = e13.split_seq(value) # Check if provided data value is valid val = check_vals(data[0], 'data_val%s' % (idx), 'float') # Extract data error and space # If length is two, centered error and no data space were given if(len(data) == 2): err = [check_vals(data[1], 'data_err%s' % (idx), 'float', 'pos')]*2 spc = 'lin' # If length is three, there are two possibilities elif(len(data) == 3): # If the third column contains a string, it is the data space if isinstance(data[2], str): err = [check_vals(data[1], 'data_err%s' % (idx), 'float', 'pos')]*2 spc = data[2] # If the third column contains no string, it is error interval else: err = check_vals(data[1:3], 'data_err%s' % (idx), 'float', 'pos') spc = 'lin' # If length is four+, error interval and data space were given else: err = check_vals(data[1:3], 'data_err%s' % (idx), 'float', 'pos') spc = data[3] # Check if valid data space has been provided spc = str(spc).replace("'", '').replace('"', '') if spc.lower() in ('lin', 'linear'): spc = 'lin' elif spc.lower() in ('log', 'log10', 'log_10'): spc = 'log10' elif spc.lower() in ('ln', 'loge', 'log_e'): spc = 'ln' else: raise ValueError("Input argument 'data_spc%s' is invalid (%r)!" % (idx, spc)) # Save data identifier as tuple or single element if(len(idx) == 1): idx = idx[0] else: idx = tuple(idx) # Add entire data point to data_dict data_dict[idx] = [val, *err, spc] # Return data_dict return(data_dict) # This function converts provided model parameters into format used by PRISM def convert_parameters(model_parameters): """ Converts the provided `model_parameters` into a full parameters dict, taking into account all formatting options, and returns it. This function can be used externally to check how the provided `model_parameters` would be interpreted when provided to the :class:`~prism.modellink.ModelLink` subclass. Its output can be used for the 'model_parameters' input argument. Parameters ---------- model_parameters : array_like, dict or str Anything that can be converted to a dict that provides model parameters information. Returns ------- par_dict : dict Dict with the provided `model_parameters` converted to its full format. """ # If a parameter file is given if isinstance(model_parameters, str): # Obtain absolute path to given file par_file = path.abspath(model_parameters) # Read the parameter file in as a string pars = np.genfromtxt(par_file, dtype=(str), delimiter=':', autostrip=True) # Make sure that pars is 2D pars = np_array(pars, ndmin=2) # Convert read-in parameters to dict model_parameters = sdict(pars) # If a parameter dict is given elif isinstance(model_parameters, dict): model_parameters = sdict(model_parameters) # If anything else is given else: # Check if it can be converted to a dict try: model_parameters = sdict(model_parameters) except Exception: raise TypeError("Input model parameters cannot be converted to" " type 'dict'!") # Initialize empty par_dict par_dict = sdict() # Loop over all items in model_parameters for name, values_str in model_parameters.items(): # Convert values_str to values values = e13.split_seq(values_str) # Check if provided name is a string name = check_vals(name, 'par_name[%r]' % (name), 'str') # Check if provided range consists of two floats par_rng = check_vals(values[:2], 'par_rng[%r]' % (name), 'float') # Check if provided lower bound is lower than the upper bound if(par_rng[0] >= par_rng[1]): raise ValueError("Input argument 'par_rng[%r]' does not define a " "valid parameter range (%f !< %f)!" % (name, par_rng[0], par_rng[1])) # Check if a float parameter estimate was provided try: est = check_vals(values[2], 'par_est[%r]' % (name), 'float') # If no estimate was provided, save it as None except IndexError: est = None # If no float was provided, check if it was None except TypeError as error: # If it is None, save it as such if(str(values[2]).lower() == 'none'): est = None # If it is not None, reraise the previous error else: raise error # If a float was provided, check if it is within parameter range else: if not(values[0] <= est <= values[1]): raise ValueError("Input argument 'par_est[%r]' is outside " "of defined parameter range!" % (name)) # Add parameter to par_dict par_dict[name] = [*par_rng, est] # Return par_dict return(par_dict) # This function tests a given ModelLink subclass # TODO: Are there any more tests that can be done here? def test_subclass(subclass, *args, **kwargs): """ Tests a provided :class:`~prism.modellink.ModelLink` `subclass` by initializing it with the given `args` and `kwargs` and checking if all required methods can be properly called. This function needs to be called by all MPI ranks. Parameters ---------- subclass : :class:`~prism.modellink.ModelLink` subclass The :class:`~prism.modellink.ModelLink` subclass that requires testing. args : positional arguments Positional arguments that need to be provided to the constructor of the `subclass`. kwargs : keyword arguments Keyword arguments that need to be provided to the constructor of the `subclass`. Returns ------- modellink_obj : :obj:`~prism.modellink.ModelLink` object Instance of the provided `subclass` if all tests pass successfully. Specific exceptions are raised if a test fails. Note ---- Depending on the complexity of the model wrapped in the given `subclass`, this function may take a while to execute. """ # Import ModelLink class from prism.modellink import ModelLink # Check if provided subclass is a class if not isclass(subclass): raise e13.InputError("Input argument 'subclass' must be a class!") # Check if provided subclass is a subclass of ModelLink if not issubclass(subclass, ModelLink): raise TypeError("Input argument 'subclass' must be a subclass of the " "ModelLink class!") # Try to initialize provided subclass try: modellink_obj = subclass(*args, **kwargs) except Exception as error: raise e13.InputError("Input argument 'subclass' cannot be initialized!" " (%s)" % (error)) # Check if modellink_obj was initialized properly if not e13.check_instance(modellink_obj, ModelLink): obj_name = modellink_obj.__class__.__name__ raise e13.InputError("Provided ModelLink subclass %r was not " "initialized properly! Make sure that %r calls " "the super constructor during initialization!" % (obj_name, obj_name)) # Obtain list of arguments call_model should take call_model_args = list(signature(ModelLink.call_model).parameters) call_model_args.remove('self') # Check if call_model takes the correct arguments obj_call_model_args = dict(signature(modellink_obj.call_model).parameters) for arg in call_model_args: if arg not in obj_call_model_args.keys(): raise e13.InputError("The 'call_model()'-method in provided " "ModelLink subclass %r does not take required" " input argument %r!" % (modellink_obj._name, arg)) else: obj_call_model_args.pop(arg) # Check if call_model takes any other arguments for arg, par in obj_call_model_args.items(): # If this parameter has no default value and is not *args or **kwargs if(par.default == _empty and par.kind != _VAR_POSITIONAL and par.kind != _VAR_KEYWORD): # Raise error raise e13.InputError("The 'call_model()'-method in provided " "ModelLink subclass %r takes an unknown " "non-optional input argument %r!" % (modellink_obj._name, arg)) # Obtain list of arguments get_md_var should take get_md_var_args = list(signature(ModelLink.get_md_var).parameters) get_md_var_args.remove('self') # Check if get_md_var takes the correct arguments obj_get_md_var_args = dict(signature(modellink_obj.get_md_var).parameters) for arg in get_md_var_args: if arg not in obj_get_md_var_args.keys(): raise e13.InputError("The 'get_md_var()'-method in provided " "ModelLink subclass %r does not take required" " input argument %r!" % (modellink_obj._name, arg)) else: obj_get_md_var_args.pop(arg) # Check if get_md_var takes any other arguments for arg, par in obj_get_md_var_args.items(): # If this parameter has no default value and is not *args or **kwargs if(par.default == _empty and par.kind != _VAR_POSITIONAL and par.kind != _VAR_KEYWORD): # Raise an error raise e13.InputError("The 'get_md_var()'-method in provided " "ModelLink subclass %r takes an unknown " "non-optional input argument %r!" % (modellink_obj._name, arg)) # Set MPI intra-communicator comm = get_HybridComm_obj() # Obtain random sam_set on controller if not comm._rank: sam_set = modellink_obj._to_par_space(rand(1, modellink_obj._n_par)) # Workers get dummy sam_set else: sam_set = [] # Broadcast random sam_set to workers sam_set = comm.bcast(sam_set, 0) # Try to evaluate sam_set in the model try: # Check who needs to call the model if not comm._rank or modellink_obj._MPI_call: # Do multi-call if modellink_obj._multi_call: mod_set = modellink_obj.call_model( emul_i=0, par_set=modellink_obj._get_sam_dict(sam_set), data_idx=modellink_obj._data_idx) # Single-call else: # Initialize mod_set mod_set = np.zeros([sam_set.shape[0], modellink_obj._n_data]) # Loop over all samples in sam_set for i, par_set in enumerate(sam_set): mod_set[i] = modellink_obj.call_model( emul_i=0, par_set=modellink_obj._get_sam_dict(par_set), data_idx=modellink_obj._data_idx) # If call_model was not overridden, catch NotImplementedError except NotImplementedError: raise NotImplementedError("Provided ModelLink subclass %r has no " "user-written 'call_model()'-method!" % (modellink_obj._name)) # If successful, check if obtained mod_set has correct shape if not comm._rank: mod_set = modellink_obj._check_mod_set(mod_set, 'mod_set') # Check if the model discrepancy variance can be obtained try: md_var = modellink_obj.get_md_var( emul_i=0, par_set=modellink_obj._get_sam_dict(sam_set[0]), data_idx=modellink_obj._data_idx) # If get_md_var was not overridden, catch NotImplementedError except NotImplementedError: warn_msg = ("Provided ModelLink subclass %r has no user-written " "'get_md_var()'-method! Default model discrepancy variance" " description would be used instead!" % (modellink_obj._name)) warnings.warn(warn_msg, RequestWarning, stacklevel=2) # If successful, check if obtained md_var has correct shape else: md_var = modellink_obj._check_md_var(md_var, 'md_var') # Return modellink_obj return(modellink_obj)
prism/modellink/utils.py
# %% IMPORTS # Built-in imports from inspect import _VAR_KEYWORD, _VAR_POSITIONAL, _empty, isclass, signature from os import path import warnings # Package imports import e13tools as e13 from mpi4pyd.MPI import get_HybridComm_obj import numpy as np from numpy.random import rand from sortedcontainers import SortedDict as sdict # PRISM imports from prism._internal import RequestWarning, check_vals, np_array # All declaration __all__ = ['convert_data', 'convert_parameters', 'test_subclass'] # %% UTILITY FUNCTIONS # This function converts provided model data into format used by PRISM def convert_data(model_data): """ Converts the provided `model_data` into a full data dict, taking into account all formatting options, and returns it. This function can be used externally to check how the provided `model_data` would be interpreted when provided to the :class:`~prism.modellink.ModelLink` subclass. Its output can be used for the 'model_data' input argument. Parameters ---------- model_data : array_like, dict or str Anything that can be converted to a dict that provides model data information. Returns ------- data_dict : dict Dict with the provided `model_data` converted to its full format. """ # If a data file is given if isinstance(model_data, str): # Obtain absolute path to given file data_file = path.abspath(model_data) # Read the data file in as a string data_points = np.genfromtxt(data_file, dtype=(str), delimiter=':', autostrip=True) # Make sure that data_points is 2D data_points = np_array(data_points, ndmin=2) # Convert read-in data to dict model_data = dict(data_points) # If a data dict is given elif isinstance(model_data, dict): model_data = dict(model_data) # If anything else is given else: # Check if it can be converted to a dict try: model_data = dict(model_data) except Exception: raise TypeError("Input model data cannot be converted to type " "'dict'!") # Make empty data_dict data_dict = dict() # Loop over all items in model_data for key, value in model_data.items(): # Convert key to an actual data_idx idx = e13.split_seq(key) # Check if tmp_idx is not empty if not idx: raise e13.InputError("Model data contains a data point with no " "identifier!") # Convert value to an actual data point data = e13.split_seq(value) # Check if provided data value is valid val = check_vals(data[0], 'data_val%s' % (idx), 'float') # Extract data error and space # If length is two, centered error and no data space were given if(len(data) == 2): err = [check_vals(data[1], 'data_err%s' % (idx), 'float', 'pos')]*2 spc = 'lin' # If length is three, there are two possibilities elif(len(data) == 3): # If the third column contains a string, it is the data space if isinstance(data[2], str): err = [check_vals(data[1], 'data_err%s' % (idx), 'float', 'pos')]*2 spc = data[2] # If the third column contains no string, it is error interval else: err = check_vals(data[1:3], 'data_err%s' % (idx), 'float', 'pos') spc = 'lin' # If length is four+, error interval and data space were given else: err = check_vals(data[1:3], 'data_err%s' % (idx), 'float', 'pos') spc = data[3] # Check if valid data space has been provided spc = str(spc).replace("'", '').replace('"', '') if spc.lower() in ('lin', 'linear'): spc = 'lin' elif spc.lower() in ('log', 'log10', 'log_10'): spc = 'log10' elif spc.lower() in ('ln', 'loge', 'log_e'): spc = 'ln' else: raise ValueError("Input argument 'data_spc%s' is invalid (%r)!" % (idx, spc)) # Save data identifier as tuple or single element if(len(idx) == 1): idx = idx[0] else: idx = tuple(idx) # Add entire data point to data_dict data_dict[idx] = [val, *err, spc] # Return data_dict return(data_dict) # This function converts provided model parameters into format used by PRISM def convert_parameters(model_parameters): """ Converts the provided `model_parameters` into a full parameters dict, taking into account all formatting options, and returns it. This function can be used externally to check how the provided `model_parameters` would be interpreted when provided to the :class:`~prism.modellink.ModelLink` subclass. Its output can be used for the 'model_parameters' input argument. Parameters ---------- model_parameters : array_like, dict or str Anything that can be converted to a dict that provides model parameters information. Returns ------- par_dict : dict Dict with the provided `model_parameters` converted to its full format. """ # If a parameter file is given if isinstance(model_parameters, str): # Obtain absolute path to given file par_file = path.abspath(model_parameters) # Read the parameter file in as a string pars = np.genfromtxt(par_file, dtype=(str), delimiter=':', autostrip=True) # Make sure that pars is 2D pars = np_array(pars, ndmin=2) # Convert read-in parameters to dict model_parameters = sdict(pars) # If a parameter dict is given elif isinstance(model_parameters, dict): model_parameters = sdict(model_parameters) # If anything else is given else: # Check if it can be converted to a dict try: model_parameters = sdict(model_parameters) except Exception: raise TypeError("Input model parameters cannot be converted to" " type 'dict'!") # Initialize empty par_dict par_dict = sdict() # Loop over all items in model_parameters for name, values_str in model_parameters.items(): # Convert values_str to values values = e13.split_seq(values_str) # Check if provided name is a string name = check_vals(name, 'par_name[%r]' % (name), 'str') # Check if provided range consists of two floats par_rng = check_vals(values[:2], 'par_rng[%r]' % (name), 'float') # Check if provided lower bound is lower than the upper bound if(par_rng[0] >= par_rng[1]): raise ValueError("Input argument 'par_rng[%r]' does not define a " "valid parameter range (%f !< %f)!" % (name, par_rng[0], par_rng[1])) # Check if a float parameter estimate was provided try: est = check_vals(values[2], 'par_est[%r]' % (name), 'float') # If no estimate was provided, save it as None except IndexError: est = None # If no float was provided, check if it was None except TypeError as error: # If it is None, save it as such if(str(values[2]).lower() == 'none'): est = None # If it is not None, reraise the previous error else: raise error # If a float was provided, check if it is within parameter range else: if not(values[0] <= est <= values[1]): raise ValueError("Input argument 'par_est[%r]' is outside " "of defined parameter range!" % (name)) # Add parameter to par_dict par_dict[name] = [*par_rng, est] # Return par_dict return(par_dict) # This function tests a given ModelLink subclass # TODO: Are there any more tests that can be done here? def test_subclass(subclass, *args, **kwargs): """ Tests a provided :class:`~prism.modellink.ModelLink` `subclass` by initializing it with the given `args` and `kwargs` and checking if all required methods can be properly called. This function needs to be called by all MPI ranks. Parameters ---------- subclass : :class:`~prism.modellink.ModelLink` subclass The :class:`~prism.modellink.ModelLink` subclass that requires testing. args : positional arguments Positional arguments that need to be provided to the constructor of the `subclass`. kwargs : keyword arguments Keyword arguments that need to be provided to the constructor of the `subclass`. Returns ------- modellink_obj : :obj:`~prism.modellink.ModelLink` object Instance of the provided `subclass` if all tests pass successfully. Specific exceptions are raised if a test fails. Note ---- Depending on the complexity of the model wrapped in the given `subclass`, this function may take a while to execute. """ # Import ModelLink class from prism.modellink import ModelLink # Check if provided subclass is a class if not isclass(subclass): raise e13.InputError("Input argument 'subclass' must be a class!") # Check if provided subclass is a subclass of ModelLink if not issubclass(subclass, ModelLink): raise TypeError("Input argument 'subclass' must be a subclass of the " "ModelLink class!") # Try to initialize provided subclass try: modellink_obj = subclass(*args, **kwargs) except Exception as error: raise e13.InputError("Input argument 'subclass' cannot be initialized!" " (%s)" % (error)) # Check if modellink_obj was initialized properly if not e13.check_instance(modellink_obj, ModelLink): obj_name = modellink_obj.__class__.__name__ raise e13.InputError("Provided ModelLink subclass %r was not " "initialized properly! Make sure that %r calls " "the super constructor during initialization!" % (obj_name, obj_name)) # Obtain list of arguments call_model should take call_model_args = list(signature(ModelLink.call_model).parameters) call_model_args.remove('self') # Check if call_model takes the correct arguments obj_call_model_args = dict(signature(modellink_obj.call_model).parameters) for arg in call_model_args: if arg not in obj_call_model_args.keys(): raise e13.InputError("The 'call_model()'-method in provided " "ModelLink subclass %r does not take required" " input argument %r!" % (modellink_obj._name, arg)) else: obj_call_model_args.pop(arg) # Check if call_model takes any other arguments for arg, par in obj_call_model_args.items(): # If this parameter has no default value and is not *args or **kwargs if(par.default == _empty and par.kind != _VAR_POSITIONAL and par.kind != _VAR_KEYWORD): # Raise error raise e13.InputError("The 'call_model()'-method in provided " "ModelLink subclass %r takes an unknown " "non-optional input argument %r!" % (modellink_obj._name, arg)) # Obtain list of arguments get_md_var should take get_md_var_args = list(signature(ModelLink.get_md_var).parameters) get_md_var_args.remove('self') # Check if get_md_var takes the correct arguments obj_get_md_var_args = dict(signature(modellink_obj.get_md_var).parameters) for arg in get_md_var_args: if arg not in obj_get_md_var_args.keys(): raise e13.InputError("The 'get_md_var()'-method in provided " "ModelLink subclass %r does not take required" " input argument %r!" % (modellink_obj._name, arg)) else: obj_get_md_var_args.pop(arg) # Check if get_md_var takes any other arguments for arg, par in obj_get_md_var_args.items(): # If this parameter has no default value and is not *args or **kwargs if(par.default == _empty and par.kind != _VAR_POSITIONAL and par.kind != _VAR_KEYWORD): # Raise an error raise e13.InputError("The 'get_md_var()'-method in provided " "ModelLink subclass %r takes an unknown " "non-optional input argument %r!" % (modellink_obj._name, arg)) # Set MPI intra-communicator comm = get_HybridComm_obj() # Obtain random sam_set on controller if not comm._rank: sam_set = modellink_obj._to_par_space(rand(1, modellink_obj._n_par)) # Workers get dummy sam_set else: sam_set = [] # Broadcast random sam_set to workers sam_set = comm.bcast(sam_set, 0) # Try to evaluate sam_set in the model try: # Check who needs to call the model if not comm._rank or modellink_obj._MPI_call: # Do multi-call if modellink_obj._multi_call: mod_set = modellink_obj.call_model( emul_i=0, par_set=modellink_obj._get_sam_dict(sam_set), data_idx=modellink_obj._data_idx) # Single-call else: # Initialize mod_set mod_set = np.zeros([sam_set.shape[0], modellink_obj._n_data]) # Loop over all samples in sam_set for i, par_set in enumerate(sam_set): mod_set[i] = modellink_obj.call_model( emul_i=0, par_set=modellink_obj._get_sam_dict(par_set), data_idx=modellink_obj._data_idx) # If call_model was not overridden, catch NotImplementedError except NotImplementedError: raise NotImplementedError("Provided ModelLink subclass %r has no " "user-written 'call_model()'-method!" % (modellink_obj._name)) # If successful, check if obtained mod_set has correct shape if not comm._rank: mod_set = modellink_obj._check_mod_set(mod_set, 'mod_set') # Check if the model discrepancy variance can be obtained try: md_var = modellink_obj.get_md_var( emul_i=0, par_set=modellink_obj._get_sam_dict(sam_set[0]), data_idx=modellink_obj._data_idx) # If get_md_var was not overridden, catch NotImplementedError except NotImplementedError: warn_msg = ("Provided ModelLink subclass %r has no user-written " "'get_md_var()'-method! Default model discrepancy variance" " description would be used instead!" % (modellink_obj._name)) warnings.warn(warn_msg, RequestWarning, stacklevel=2) # If successful, check if obtained md_var has correct shape else: md_var = modellink_obj._check_md_var(md_var, 'md_var') # Return modellink_obj return(modellink_obj)
0.830113
0.466603
from mathics.builtin.base import ( Builtin, Test, ) from mathics.builtin.lists import list_boxes from mathics.core.expression import Expression from mathics.core.atoms import Integer from mathics.core.symbols import Symbol, SymbolList, SymbolTrue from mathics.core.systemsymbols import ( SymbolAssociation, SymbolMakeBoxes, SymbolRowBox, ) from mathics.core.attributes import hold_all_complete, protected class Association(Builtin): """ <dl> <dt>'Association[$key1$ -> $val1$, $key2$ -> $val2$, ...]' <dt>'<|$key1$ -> $val1$, $key2$ -> $val2$, ...|>' <dd> represents an association between keys and values. </dl> 'Association' is the head of associations: >> Head[<|a -> x, b -> y, c -> z|>] = Association >> <|a -> x, b -> y|> = <|a -> x, b -> y|> >> Association[{a -> x, b -> y}] = <|a -> x, b -> y|> Associations can be nested: >> <|a -> x, b -> y, <|a -> z, d -> t|>|> = <|a -> z, b -> y, d -> t|> #> <|a -> x, b -> y, c -> <|d -> t|>|> = <|a -> x, b -> y, c -> <|d -> t|>|> #> %["s"] = Missing[KeyAbsent, s] #> <|a -> x, b + c -> y, {<|{}|>, a -> {z}}|> = <|a -> {z}, b + c -> y|> #> %[a] = {z} #> <|"x" -> 1, {y} -> 1|> = <|x -> 1, {y} -> 1|> #> %["x"] = 1 #> <|<|a -> v|> -> x, <|b -> y, a -> <|c -> z|>, {}, <||>|>, {d}|>[c] = Association[Association[a -> v] -> x, Association[b -> y, a -> Association[c -> z], {}, Association[]], {d}][c] #> <|<|a -> v|> -> x, <|b -> y, a -> <|c -> z|>, {d}|>, {}, <||>|>[a] = Association[Association[a -> v] -> x, Association[b -> y, a -> Association[c -> z], {d}], {}, Association[]][a] #> <|<|a -> v|> -> x, <|b -> y, a -> <|c -> z, {d}|>, {}, <||>|>, {}, <||>|> = <|<|a -> v|> -> x, b -> y, a -> Association[c -> z, {d}]|> #> %[a] = Association[c -> z, {d}] #> <|a -> x, b -> y, c -> <|d -> t|>|> // ToBoxes = RowBox[{<|, RowBox[{RowBox[{a, ->, x}], ,, RowBox[{b, ->, y}], ,, RowBox[{c, ->, RowBox[{<|, RowBox[{d, ->, t}], |>}]}]}], |>}] #> Association[a -> x, b -> y, c -> Association[d -> t, Association[e -> u]]] // ToBoxes = RowBox[{<|, RowBox[{RowBox[{a, ->, x}], ,, RowBox[{b, ->, y}], ,, RowBox[{c, ->, RowBox[{<|, RowBox[{RowBox[{d, ->, t}], ,, RowBox[{e, ->, u}]}], |>}]}]}], |>}] """ error_idx = 0 attributes = hold_all_complete | protected summary_text = "an association between keys and values" def apply_makeboxes(self, rules, f, evaluation): """MakeBoxes[<|rules___|>, f:StandardForm|TraditionalForm|OutputForm|InputForm]""" def validate(exprs): for expr in exprs: if expr.has_form(("Rule", "RuleDelayed"), 2): pass elif expr.has_form(("List", "Association"), None): if not validate(expr._elements): return False else: return False return True rules = rules.get_sequence() if self.error_idx == 0 and validate(rules) is True: expr = Expression( SymbolRowBox, Expression(SymbolList, *list_boxes(rules, f, "<|", "|>")) ) else: self.error_idx += 1 symbol = Expression(SymbolMakeBoxes, SymbolAssociation, f) expr = Expression( SymbolRowBox, Expression(SymbolList, symbol, *list_boxes(rules, f, "[", "]")), ) expr = expr.evaluate(evaluation) if self.error_idx > 0: self.error_idx -= 1 return expr def apply(self, rules, evaluation): "Association[rules__]" def make_flatten(exprs, rules_dictionary: dict = {}): for expr in exprs: if expr.has_form(("Rule", "RuleDelayed"), 2): key = expr._elements[0].evaluate(evaluation) value = expr._elements[1].evaluate(evaluation) rules_dictionary[key] = Expression(expr.get_head(), key, value) elif expr.has_form(("List", "Association"), None): make_flatten(expr._elements, rules_dictionary) else: raise TypeError return rules_dictionary.values() try: return Expression(SymbolAssociation, *make_flatten(rules.get_sequence())) except TypeError: return None def apply_key(self, rules, key, evaluation): "Association[rules__][key_]" def find_key(exprs, rules_dictionary: dict = {}): for expr in exprs: if expr.has_form(("Rule", "RuleDelayed"), 2): if expr._elements[0] == key: rules_dictionary[key] = expr._elements[1] elif expr.has_form(("List", "Association"), None): find_key(expr._elements) else: raise TypeError return rules_dictionary try: result = find_key(rules.get_sequence()) return ( result[key] if result else Expression("Missing", Symbol("KeyAbsent"), key) ) except TypeError: return None class AssociationQ(Test): """ <dl> <dt>'AssociationQ[$expr$]' <dd>return True if $expr$ is a valid Association object, and False otherwise. </dl> >> AssociationQ[<|a -> 1, b :> 2|>] = True >> AssociationQ[<|a, b|>] = False """ summary_text = "test if an expression is a valid association" def test(self, expr): def validate(leaves): for leaf in leaves: if leaf.has_form(("Rule", "RuleDelayed"), 2): pass elif leaf.has_form(("List", "Association"), None): if not validate(leaf.leaves): return False else: return False return True return expr.get_head_name() == "System`Association" and validate(expr.leaves) class Keys(Builtin): """ <dl> <dt>'Keys[<|$key1$ -> $val1$, $key2$ -> $val2$, ...|>]' <dd>return a list of the keys $keyi$ in an association. <dt>'Keys[{$key1$ -> $val1$, $key2$ -> $val2$, ...}]' <dd>return a list of the $keyi$ in a list of rules. </dl> >> Keys[<|a -> x, b -> y|>] = {a, b} >> Keys[{a -> x, b -> y}] = {a, b} Keys automatically threads over lists: >> Keys[{<|a -> x, b -> y|>, {w -> z, {}}}] = {{a, b}, {w, {}}} Keys are listed in the order of their appearance: >> Keys[{c -> z, b -> y, a -> x}] = {c, b, a} #> Keys[a -> x] = a #> Keys[{a -> x, a -> y, {a -> z, <|b -> t|>, <||>, {}}}] = {a, a, {a, {b}, {}, {}}} #> Keys[{a -> x, a -> y, <|a -> z, {b -> t}, <||>, {}|>}] = {a, a, {a, b}} #> Keys[<|a -> x, a -> y, <|a -> z, <|b -> t|>, <||>, {}|>|>] = {a, b} #> Keys[<|a -> x, a -> y, {a -> z, {b -> t}, <||>, {}}|>] = {a, b} #> Keys[<|a -> x, <|a -> y, b|>|>] : The argument Association[a -> x, Association[a -> y, b]] is not a valid Association or a list of rules. = Keys[Association[a -> x, Association[a -> y, b]]] #> Keys[<|a -> x, {a -> y, b}|>] : The argument Association[a -> x, {a -> y, b}] is not a valid Association or a list of rules. = Keys[Association[a -> x, {a -> y, b}]] #> Keys[{a -> x, <|a -> y, b|>}] : The argument Association[a -> y, b] is not a valid Association or a list of rules. = Keys[{a -> x, Association[a -> y, b]}] #> Keys[{a -> x, {a -> y, b}}] : The argument b is not a valid Association or a list of rules. = Keys[{a -> x, {a -> y, b}}] #> Keys[a -> x, b -> y] : Keys called with 2 arguments; 1 argument is expected. = Keys[a -> x, b -> y] """ attributes = protected messages = { "argx": "Keys called with `1` arguments; 1 argument is expected.", "invrl": "The argument `1` is not a valid Association or a list of rules.", } summary_text = "list association keys" def apply(self, rules, evaluation): "Keys[rules___]" def get_keys(expr): if expr.has_form(("Rule", "RuleDelayed"), 2): return expr.leaves[0] elif expr.has_form("List", None) or ( expr.has_form("Association", None) and AssociationQ(expr).evaluate(evaluation) is SymbolTrue ): return Expression(SymbolList, *[get_keys(leaf) for leaf in expr.leaves]) else: evaluation.message("Keys", "invrl", expr) raise TypeError rules = rules.get_sequence() if len(rules) != 1: return evaluation.message("Keys", "argx", Integer(len(rules))) try: return get_keys(rules[0]) except TypeError: return None class Lookup(Builtin): """ <dl> <dt>Lookup[$assoc$, $key$] <dd> looks up the value associated with $key$ in the association $assoc$, or Missing[$KeyAbsent$]. </dl> """ attributes = hold_all_complete rules = { "Lookup[assoc_?AssociationQ, key_, default_]": "FirstCase[assoc, _[Verbatim[key], val_] :> val, default]", "Lookup[assoc_?AssociationQ, key_]": 'Lookup[assoc, key, Missing["KeyAbsent", key]]', } summary_text = "perform lookup of a value by key, returning a specified default if it is not found" class Missing(Builtin): """ <dl> <dd>'Missing[]' <dt> represents a data that is misssing. </dl> >> ElementData["Meitnerium","MeltingPoint"] = Missing[NotAvailable] """ summary_text = "symbolic representation of missing data" class Values(Builtin): """ <dl> <dt>'Values[<|$key1$ -> $val1$, $key2$ -> $val2$, ...|>]' <dd>return a list of the values $vali$ in an association. <dt>'Values[{$key1$ -> $val1$, $key2$ -> $val2$, ...}]' <dd>return a list of the $vali$ in a list of rules. </dl> >> Values[<|a -> x, b -> y|>] = {x, y} >> Values[{a -> x, b -> y}] = {x, y} Values automatically threads over lists: >> Values[{<|a -> x, b -> y|>, {c -> z, {}}}] = {{x, y}, {z, {}}} Values are listed in the order of their appearance: >> Values[{c -> z, b -> y, a -> x}] = {z, y, x} #> Values[a -> x] = x #> Values[{a -> x, a -> y, {a -> z, <|b -> t|>, <||>, {}}}] = {x, y, {z, {t}, {}, {}}} #> Values[{a -> x, a -> y, <|a -> z, {b -> t}, <||>, {}|>}] = {x, y, {z, t}} #> Values[<|a -> x, a -> y, <|a -> z, <|b -> t|>, <||>, {}|>|>] = {z, t} #> Values[<|a -> x, a -> y, {a -> z, {b -> t}, <||>, {}}|>] = {z, t} #> Values[<|a -> x, <|a -> y, b|>|>] : The argument Association[a -> x, Association[a -> y, b]] is not a valid Association or a list of rules. = Values[Association[a -> x, Association[a -> y, b]]] #> Values[<|a -> x, {a -> y, b}|>] : The argument Association[a -> x, {a -> y, b}] is not a valid Association or a list of rules. = Values[Association[a -> x, {a -> y, b}]] #> Values[{a -> x, <|a -> y, b|>}] : The argument {a -> x, Association[a -> y, b]} is not a valid Association or a list of rules. = Values[{a -> x, Association[a -> y, b]}] #> Values[{a -> x, {a -> y, b}}] : The argument {a -> x, {a -> y, b}} is not a valid Association or a list of rules. = Values[{a -> x, {a -> y, b}}] #> Values[a -> x, b -> y] : Values called with 2 arguments; 1 argument is expected. = Values[a -> x, b -> y] """ attributes = protected messages = { "argx": "Values called with `1` arguments; 1 argument is expected.", "invrl": "The argument `1` is not a valid Association or a list of rules.", } summary_text = "list association values" def apply(self, rules, evaluation): "Values[rules___]" def get_values(expr): if expr.has_form(("Rule", "RuleDelayed"), 2): return expr.leaves[1] elif expr.has_form("List", None) or ( expr.has_form("Association", None) and AssociationQ(expr).evaluate(evaluation) is Symbol("True") ): return Expression( SymbolList, *[get_values(leaf) for leaf in expr.leaves] ) else: raise TypeError rules = rules.get_sequence() if len(rules) != 1: return evaluation.message("Values", "argx", Integer(len(rules))) try: return get_values(rules[0]) except TypeError: return evaluation.message("Values", "invrl", rules[0])
mathics/builtin/list/associations.py
from mathics.builtin.base import ( Builtin, Test, ) from mathics.builtin.lists import list_boxes from mathics.core.expression import Expression from mathics.core.atoms import Integer from mathics.core.symbols import Symbol, SymbolList, SymbolTrue from mathics.core.systemsymbols import ( SymbolAssociation, SymbolMakeBoxes, SymbolRowBox, ) from mathics.core.attributes import hold_all_complete, protected class Association(Builtin): """ <dl> <dt>'Association[$key1$ -> $val1$, $key2$ -> $val2$, ...]' <dt>'<|$key1$ -> $val1$, $key2$ -> $val2$, ...|>' <dd> represents an association between keys and values. </dl> 'Association' is the head of associations: >> Head[<|a -> x, b -> y, c -> z|>] = Association >> <|a -> x, b -> y|> = <|a -> x, b -> y|> >> Association[{a -> x, b -> y}] = <|a -> x, b -> y|> Associations can be nested: >> <|a -> x, b -> y, <|a -> z, d -> t|>|> = <|a -> z, b -> y, d -> t|> #> <|a -> x, b -> y, c -> <|d -> t|>|> = <|a -> x, b -> y, c -> <|d -> t|>|> #> %["s"] = Missing[KeyAbsent, s] #> <|a -> x, b + c -> y, {<|{}|>, a -> {z}}|> = <|a -> {z}, b + c -> y|> #> %[a] = {z} #> <|"x" -> 1, {y} -> 1|> = <|x -> 1, {y} -> 1|> #> %["x"] = 1 #> <|<|a -> v|> -> x, <|b -> y, a -> <|c -> z|>, {}, <||>|>, {d}|>[c] = Association[Association[a -> v] -> x, Association[b -> y, a -> Association[c -> z], {}, Association[]], {d}][c] #> <|<|a -> v|> -> x, <|b -> y, a -> <|c -> z|>, {d}|>, {}, <||>|>[a] = Association[Association[a -> v] -> x, Association[b -> y, a -> Association[c -> z], {d}], {}, Association[]][a] #> <|<|a -> v|> -> x, <|b -> y, a -> <|c -> z, {d}|>, {}, <||>|>, {}, <||>|> = <|<|a -> v|> -> x, b -> y, a -> Association[c -> z, {d}]|> #> %[a] = Association[c -> z, {d}] #> <|a -> x, b -> y, c -> <|d -> t|>|> // ToBoxes = RowBox[{<|, RowBox[{RowBox[{a, ->, x}], ,, RowBox[{b, ->, y}], ,, RowBox[{c, ->, RowBox[{<|, RowBox[{d, ->, t}], |>}]}]}], |>}] #> Association[a -> x, b -> y, c -> Association[d -> t, Association[e -> u]]] // ToBoxes = RowBox[{<|, RowBox[{RowBox[{a, ->, x}], ,, RowBox[{b, ->, y}], ,, RowBox[{c, ->, RowBox[{<|, RowBox[{RowBox[{d, ->, t}], ,, RowBox[{e, ->, u}]}], |>}]}]}], |>}] """ error_idx = 0 attributes = hold_all_complete | protected summary_text = "an association between keys and values" def apply_makeboxes(self, rules, f, evaluation): """MakeBoxes[<|rules___|>, f:StandardForm|TraditionalForm|OutputForm|InputForm]""" def validate(exprs): for expr in exprs: if expr.has_form(("Rule", "RuleDelayed"), 2): pass elif expr.has_form(("List", "Association"), None): if not validate(expr._elements): return False else: return False return True rules = rules.get_sequence() if self.error_idx == 0 and validate(rules) is True: expr = Expression( SymbolRowBox, Expression(SymbolList, *list_boxes(rules, f, "<|", "|>")) ) else: self.error_idx += 1 symbol = Expression(SymbolMakeBoxes, SymbolAssociation, f) expr = Expression( SymbolRowBox, Expression(SymbolList, symbol, *list_boxes(rules, f, "[", "]")), ) expr = expr.evaluate(evaluation) if self.error_idx > 0: self.error_idx -= 1 return expr def apply(self, rules, evaluation): "Association[rules__]" def make_flatten(exprs, rules_dictionary: dict = {}): for expr in exprs: if expr.has_form(("Rule", "RuleDelayed"), 2): key = expr._elements[0].evaluate(evaluation) value = expr._elements[1].evaluate(evaluation) rules_dictionary[key] = Expression(expr.get_head(), key, value) elif expr.has_form(("List", "Association"), None): make_flatten(expr._elements, rules_dictionary) else: raise TypeError return rules_dictionary.values() try: return Expression(SymbolAssociation, *make_flatten(rules.get_sequence())) except TypeError: return None def apply_key(self, rules, key, evaluation): "Association[rules__][key_]" def find_key(exprs, rules_dictionary: dict = {}): for expr in exprs: if expr.has_form(("Rule", "RuleDelayed"), 2): if expr._elements[0] == key: rules_dictionary[key] = expr._elements[1] elif expr.has_form(("List", "Association"), None): find_key(expr._elements) else: raise TypeError return rules_dictionary try: result = find_key(rules.get_sequence()) return ( result[key] if result else Expression("Missing", Symbol("KeyAbsent"), key) ) except TypeError: return None class AssociationQ(Test): """ <dl> <dt>'AssociationQ[$expr$]' <dd>return True if $expr$ is a valid Association object, and False otherwise. </dl> >> AssociationQ[<|a -> 1, b :> 2|>] = True >> AssociationQ[<|a, b|>] = False """ summary_text = "test if an expression is a valid association" def test(self, expr): def validate(leaves): for leaf in leaves: if leaf.has_form(("Rule", "RuleDelayed"), 2): pass elif leaf.has_form(("List", "Association"), None): if not validate(leaf.leaves): return False else: return False return True return expr.get_head_name() == "System`Association" and validate(expr.leaves) class Keys(Builtin): """ <dl> <dt>'Keys[<|$key1$ -> $val1$, $key2$ -> $val2$, ...|>]' <dd>return a list of the keys $keyi$ in an association. <dt>'Keys[{$key1$ -> $val1$, $key2$ -> $val2$, ...}]' <dd>return a list of the $keyi$ in a list of rules. </dl> >> Keys[<|a -> x, b -> y|>] = {a, b} >> Keys[{a -> x, b -> y}] = {a, b} Keys automatically threads over lists: >> Keys[{<|a -> x, b -> y|>, {w -> z, {}}}] = {{a, b}, {w, {}}} Keys are listed in the order of their appearance: >> Keys[{c -> z, b -> y, a -> x}] = {c, b, a} #> Keys[a -> x] = a #> Keys[{a -> x, a -> y, {a -> z, <|b -> t|>, <||>, {}}}] = {a, a, {a, {b}, {}, {}}} #> Keys[{a -> x, a -> y, <|a -> z, {b -> t}, <||>, {}|>}] = {a, a, {a, b}} #> Keys[<|a -> x, a -> y, <|a -> z, <|b -> t|>, <||>, {}|>|>] = {a, b} #> Keys[<|a -> x, a -> y, {a -> z, {b -> t}, <||>, {}}|>] = {a, b} #> Keys[<|a -> x, <|a -> y, b|>|>] : The argument Association[a -> x, Association[a -> y, b]] is not a valid Association or a list of rules. = Keys[Association[a -> x, Association[a -> y, b]]] #> Keys[<|a -> x, {a -> y, b}|>] : The argument Association[a -> x, {a -> y, b}] is not a valid Association or a list of rules. = Keys[Association[a -> x, {a -> y, b}]] #> Keys[{a -> x, <|a -> y, b|>}] : The argument Association[a -> y, b] is not a valid Association or a list of rules. = Keys[{a -> x, Association[a -> y, b]}] #> Keys[{a -> x, {a -> y, b}}] : The argument b is not a valid Association or a list of rules. = Keys[{a -> x, {a -> y, b}}] #> Keys[a -> x, b -> y] : Keys called with 2 arguments; 1 argument is expected. = Keys[a -> x, b -> y] """ attributes = protected messages = { "argx": "Keys called with `1` arguments; 1 argument is expected.", "invrl": "The argument `1` is not a valid Association or a list of rules.", } summary_text = "list association keys" def apply(self, rules, evaluation): "Keys[rules___]" def get_keys(expr): if expr.has_form(("Rule", "RuleDelayed"), 2): return expr.leaves[0] elif expr.has_form("List", None) or ( expr.has_form("Association", None) and AssociationQ(expr).evaluate(evaluation) is SymbolTrue ): return Expression(SymbolList, *[get_keys(leaf) for leaf in expr.leaves]) else: evaluation.message("Keys", "invrl", expr) raise TypeError rules = rules.get_sequence() if len(rules) != 1: return evaluation.message("Keys", "argx", Integer(len(rules))) try: return get_keys(rules[0]) except TypeError: return None class Lookup(Builtin): """ <dl> <dt>Lookup[$assoc$, $key$] <dd> looks up the value associated with $key$ in the association $assoc$, or Missing[$KeyAbsent$]. </dl> """ attributes = hold_all_complete rules = { "Lookup[assoc_?AssociationQ, key_, default_]": "FirstCase[assoc, _[Verbatim[key], val_] :> val, default]", "Lookup[assoc_?AssociationQ, key_]": 'Lookup[assoc, key, Missing["KeyAbsent", key]]', } summary_text = "perform lookup of a value by key, returning a specified default if it is not found" class Missing(Builtin): """ <dl> <dd>'Missing[]' <dt> represents a data that is misssing. </dl> >> ElementData["Meitnerium","MeltingPoint"] = Missing[NotAvailable] """ summary_text = "symbolic representation of missing data" class Values(Builtin): """ <dl> <dt>'Values[<|$key1$ -> $val1$, $key2$ -> $val2$, ...|>]' <dd>return a list of the values $vali$ in an association. <dt>'Values[{$key1$ -> $val1$, $key2$ -> $val2$, ...}]' <dd>return a list of the $vali$ in a list of rules. </dl> >> Values[<|a -> x, b -> y|>] = {x, y} >> Values[{a -> x, b -> y}] = {x, y} Values automatically threads over lists: >> Values[{<|a -> x, b -> y|>, {c -> z, {}}}] = {{x, y}, {z, {}}} Values are listed in the order of their appearance: >> Values[{c -> z, b -> y, a -> x}] = {z, y, x} #> Values[a -> x] = x #> Values[{a -> x, a -> y, {a -> z, <|b -> t|>, <||>, {}}}] = {x, y, {z, {t}, {}, {}}} #> Values[{a -> x, a -> y, <|a -> z, {b -> t}, <||>, {}|>}] = {x, y, {z, t}} #> Values[<|a -> x, a -> y, <|a -> z, <|b -> t|>, <||>, {}|>|>] = {z, t} #> Values[<|a -> x, a -> y, {a -> z, {b -> t}, <||>, {}}|>] = {z, t} #> Values[<|a -> x, <|a -> y, b|>|>] : The argument Association[a -> x, Association[a -> y, b]] is not a valid Association or a list of rules. = Values[Association[a -> x, Association[a -> y, b]]] #> Values[<|a -> x, {a -> y, b}|>] : The argument Association[a -> x, {a -> y, b}] is not a valid Association or a list of rules. = Values[Association[a -> x, {a -> y, b}]] #> Values[{a -> x, <|a -> y, b|>}] : The argument {a -> x, Association[a -> y, b]} is not a valid Association or a list of rules. = Values[{a -> x, Association[a -> y, b]}] #> Values[{a -> x, {a -> y, b}}] : The argument {a -> x, {a -> y, b}} is not a valid Association or a list of rules. = Values[{a -> x, {a -> y, b}}] #> Values[a -> x, b -> y] : Values called with 2 arguments; 1 argument is expected. = Values[a -> x, b -> y] """ attributes = protected messages = { "argx": "Values called with `1` arguments; 1 argument is expected.", "invrl": "The argument `1` is not a valid Association or a list of rules.", } summary_text = "list association values" def apply(self, rules, evaluation): "Values[rules___]" def get_values(expr): if expr.has_form(("Rule", "RuleDelayed"), 2): return expr.leaves[1] elif expr.has_form("List", None) or ( expr.has_form("Association", None) and AssociationQ(expr).evaluate(evaluation) is Symbol("True") ): return Expression( SymbolList, *[get_values(leaf) for leaf in expr.leaves] ) else: raise TypeError rules = rules.get_sequence() if len(rules) != 1: return evaluation.message("Values", "argx", Integer(len(rules))) try: return get_values(rules[0]) except TypeError: return evaluation.message("Values", "invrl", rules[0])
0.626467
0.555254
import re import requests import os import csv # Fukcije url_to_text, zapisi_csv in zapisi_v_csv so pobrane z repozitorija od predmeta Programiranje 1. STEVILO_STRANI = 166 mapa = 'zajeti_podatki' linki = 'linki.txt' nepremicnine_txt = 'nepremicnine.txt' nepremicnine_csv = 'nepremicnine.csv' vzorec_linka = r'<meta itemprop="mainEntityOfPage" content="https://www.nepremicnine.net/oglasi-prodaja/(.*)" />' vzorec_lastnosti = ( r'<div class="more_info">Posredovanje: (.*?) ' r'\| Vrsta: (.*?) ' r'\| Regija: (?P<regija>.*?) ' r'\| Upravna enota: (?P<upravna_enota>.*?) ' r'\| Občina: (?P<obcina>.*?)' r'</div><div class="main-data">.*?' r'<div class="kratek" itemprop="description"><strong class="rdeca">(?P<kraj>.*?)</strong>, ' r'((?P<povrsina>[\d,.]*) m2, )?.*?' r'((?P<vrsta_hise>.*?), )?.*?' r'l. (?P<leto>[0-9]{4}),.*?' r'( adaptiran[oa] l. (?P<adaptirana>[0-9]{1,4}),)?.*?' r'( (?P<zemljisce>[\d.,]*) m2 zemljišča,)?.*?' r'(.ena:(.*?)? (?P<cena>[\d,.]* EUR(/m2)?))?<' ) def url_to_text(url): '''Funkcija, ki sprejme url naslov strani, stran pobere in pretvori v niz.''' try: vsebina_strani = requests.get(url) except requests.exceptions.ConnectionError: print('Prišlo je do napake pri povezovanju.') return None if vsebina_strani.status_code == requests.codes.ok: return vsebina_strani.text print('Težave pri vsebini strani') return None def zajemi_linke(mapa, datoteka_z_linki): '''Funkcija, ki s strani pobere linke do nepremičnin, ki jih kasneje uporabimo za pridobitev podatkov, in jih zapiše v datoteko.''' os.makedirs(mapa, exist_ok=True) pot = os.path.join(mapa, datoteka_z_linki) if os.path.exists(pot): # če obstaja datoteka z linki jo izbrišemo; datoteko pri vsaki uporabi na novo ustvarimo, da se izognemo nedelujočim linkom os.remove(pot) for stran in range(1, STEVILO_STRANI+1): url = f'https://www.nepremicnine.net/oglasi-prodaja/slovenija/hisa/{stran}/?s=1' vsebina = url_to_text(url) for link in re.findall(vzorec_linka, vsebina): with open(pot, 'a', encoding='utf-8') as f: print(link, file=f) return None def poberi_podatke(mapa, datoteka_z_linki, datoteka_z_nepremicninami): '''Funkcija, ki iz spletnih strani, navedenih v datoteki z linki, pobere podatke, jih zapiše v pomožno datoteko in vrne seznam slovarjev s podatki.''' zajemi_linke(mapa, datoteka_z_linki) oglasi = [] with open(os.path.join(mapa, datoteka_z_linki)) as f: for link in f: podatki_hise = {} text = url_to_text(f'https://www.nepremicnine.net/oglasi-prodaja/{link}') id_hise = re.findall(r'\d{7}', link)[0] # id hiše preberemo iz linka podatki_hise['id'] = id_hise lastnosti = re.search(vzorec_lastnosti, text, re.DOTALL) try: podatki_hise.update(lastnosti.groupdict()) except: pass oglasi.append(podatki_hise) with open(os.path.join(mapa, datoteka_z_nepremicninami), 'a', encoding='utf-8') as dat: # ustvari pomozno datoteko, v kateri so zapisani slovarji s podatki o nepremičninah, z namenom preverjanja pravilnosti pobranih podatkov print(podatki_hise, file=dat) return oglasi def zapisi_csv(glava, vrstice, mapa, datoteka_z_nepremicninami): pot = os.path.join(mapa, datoteka_z_nepremicninami) with open(pot, 'w', encoding='utf-8', newline='') as dat: writer = csv.DictWriter(dat, fieldnames=glava) writer.writeheader() for vrstica in vrstice: if vrstica['povrsina'] != None: vrstica['povrsina'] = float(vrstica['povrsina'].replace('.', '').replace(',', '.')) if vrstica['zemljisce'] != None: vrstica['zemljisce'] = float(vrstica['zemljisce'].replace('.', '').replace(',', '.')) if vrstica['cena'] != None: prebrana_cena = vrstica['cena'] cena = float(prebrana_cena.split(',')[0].replace('.', '')) if '/m2' in prebrana_cena: nova_cena = float(vrstica['povrsina'] * cena) vrstica['cena'] = nova_cena else: vrstica['cena'] = cena writer.writerow(vrstica) return None def zapisi_v_csv(oglasi, mapa, datoteka_z_nepremicninami): assert oglasi and (all(j.keys() == oglasi[0].keys() for j in oglasi)) zapisi_csv(oglasi[0].keys(), oglasi, mapa, datoteka_z_nepremicninami) def main(): podatki = poberi_podatke(mapa, linki, nepremicnine_txt) zapisi_v_csv(podatki, mapa, nepremicnine_csv) main()
zajemi_strani.py
import re import requests import os import csv # Fukcije url_to_text, zapisi_csv in zapisi_v_csv so pobrane z repozitorija od predmeta Programiranje 1. STEVILO_STRANI = 166 mapa = 'zajeti_podatki' linki = 'linki.txt' nepremicnine_txt = 'nepremicnine.txt' nepremicnine_csv = 'nepremicnine.csv' vzorec_linka = r'<meta itemprop="mainEntityOfPage" content="https://www.nepremicnine.net/oglasi-prodaja/(.*)" />' vzorec_lastnosti = ( r'<div class="more_info">Posredovanje: (.*?) ' r'\| Vrsta: (.*?) ' r'\| Regija: (?P<regija>.*?) ' r'\| Upravna enota: (?P<upravna_enota>.*?) ' r'\| Občina: (?P<obcina>.*?)' r'</div><div class="main-data">.*?' r'<div class="kratek" itemprop="description"><strong class="rdeca">(?P<kraj>.*?)</strong>, ' r'((?P<povrsina>[\d,.]*) m2, )?.*?' r'((?P<vrsta_hise>.*?), )?.*?' r'l. (?P<leto>[0-9]{4}),.*?' r'( adaptiran[oa] l. (?P<adaptirana>[0-9]{1,4}),)?.*?' r'( (?P<zemljisce>[\d.,]*) m2 zemljišča,)?.*?' r'(.ena:(.*?)? (?P<cena>[\d,.]* EUR(/m2)?))?<' ) def url_to_text(url): '''Funkcija, ki sprejme url naslov strani, stran pobere in pretvori v niz.''' try: vsebina_strani = requests.get(url) except requests.exceptions.ConnectionError: print('Prišlo je do napake pri povezovanju.') return None if vsebina_strani.status_code == requests.codes.ok: return vsebina_strani.text print('Težave pri vsebini strani') return None def zajemi_linke(mapa, datoteka_z_linki): '''Funkcija, ki s strani pobere linke do nepremičnin, ki jih kasneje uporabimo za pridobitev podatkov, in jih zapiše v datoteko.''' os.makedirs(mapa, exist_ok=True) pot = os.path.join(mapa, datoteka_z_linki) if os.path.exists(pot): # če obstaja datoteka z linki jo izbrišemo; datoteko pri vsaki uporabi na novo ustvarimo, da se izognemo nedelujočim linkom os.remove(pot) for stran in range(1, STEVILO_STRANI+1): url = f'https://www.nepremicnine.net/oglasi-prodaja/slovenija/hisa/{stran}/?s=1' vsebina = url_to_text(url) for link in re.findall(vzorec_linka, vsebina): with open(pot, 'a', encoding='utf-8') as f: print(link, file=f) return None def poberi_podatke(mapa, datoteka_z_linki, datoteka_z_nepremicninami): '''Funkcija, ki iz spletnih strani, navedenih v datoteki z linki, pobere podatke, jih zapiše v pomožno datoteko in vrne seznam slovarjev s podatki.''' zajemi_linke(mapa, datoteka_z_linki) oglasi = [] with open(os.path.join(mapa, datoteka_z_linki)) as f: for link in f: podatki_hise = {} text = url_to_text(f'https://www.nepremicnine.net/oglasi-prodaja/{link}') id_hise = re.findall(r'\d{7}', link)[0] # id hiše preberemo iz linka podatki_hise['id'] = id_hise lastnosti = re.search(vzorec_lastnosti, text, re.DOTALL) try: podatki_hise.update(lastnosti.groupdict()) except: pass oglasi.append(podatki_hise) with open(os.path.join(mapa, datoteka_z_nepremicninami), 'a', encoding='utf-8') as dat: # ustvari pomozno datoteko, v kateri so zapisani slovarji s podatki o nepremičninah, z namenom preverjanja pravilnosti pobranih podatkov print(podatki_hise, file=dat) return oglasi def zapisi_csv(glava, vrstice, mapa, datoteka_z_nepremicninami): pot = os.path.join(mapa, datoteka_z_nepremicninami) with open(pot, 'w', encoding='utf-8', newline='') as dat: writer = csv.DictWriter(dat, fieldnames=glava) writer.writeheader() for vrstica in vrstice: if vrstica['povrsina'] != None: vrstica['povrsina'] = float(vrstica['povrsina'].replace('.', '').replace(',', '.')) if vrstica['zemljisce'] != None: vrstica['zemljisce'] = float(vrstica['zemljisce'].replace('.', '').replace(',', '.')) if vrstica['cena'] != None: prebrana_cena = vrstica['cena'] cena = float(prebrana_cena.split(',')[0].replace('.', '')) if '/m2' in prebrana_cena: nova_cena = float(vrstica['povrsina'] * cena) vrstica['cena'] = nova_cena else: vrstica['cena'] = cena writer.writerow(vrstica) return None def zapisi_v_csv(oglasi, mapa, datoteka_z_nepremicninami): assert oglasi and (all(j.keys() == oglasi[0].keys() for j in oglasi)) zapisi_csv(oglasi[0].keys(), oglasi, mapa, datoteka_z_nepremicninami) def main(): podatki = poberi_podatke(mapa, linki, nepremicnine_txt) zapisi_v_csv(podatki, mapa, nepremicnine_csv) main()
0.133105
0.191214
import argparse import json import os import platform import subprocess import prepare_compile_cmd import prepare_compiler_info import prepare_analyzer_cmd def execute(cmd): print("Executing command: " + ' '.join(cmd)) try: proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", errors="ignore") out, err = proc.communicate() print("stdout:\n\n" + out.decode("utf-8")) print("stderr:\n\n" + err.decode("utf-8")) if proc.returncode != 0: print('Unsuccessful run: "' + ' '.join(cmd) + '"') raise Exception("Unsuccessful run of command.") return out except OSError: print('Failed to run: "' + ' '.join(cmd) + '"') raise def get_triple_arch(analyze_command_file): with open(analyze_command_file, encoding="utf-8", errors="ignore") as f: cmd = f.readline() cmd = cmd.split() for flag in cmd: if flag.startswith('--target='): return flag[9:].split('-')[0] # 9 == len('--target=') return platform.machine() if __name__ == '__main__': parser = argparse.ArgumentParser( description='Prepare all commands ' 'to execute in local environmennt for debugging.') parser.add_argument( '--sources_root', default='./sources-root', help="Path of the source root.") parser.add_argument( '--report_dir', default='..', help="Path of the report dir.") parser.add_argument( '--clang', required=True, help="Path to the clang binary.") parser.add_argument( '--clang_plugin_name', default=None, help="Name of the used clang plugin.") parser.add_argument( '--clang_plugin_path', default=None, help="Path to the used clang plugin.") args = parser.parse_args() compile_cmd_debug = "compile_cmd_DEBUG.json" with open(compile_cmd_debug, 'w', encoding="utf-8", errors="ignore") as f: f.write( json.dumps( prepare_compile_cmd.prepare( os.path.join(args.report_dir, "compile_cmd.json"), args.sources_root), indent=4)) compiler_info_debug = "compiler_info_DEBUG.json" with open(compiler_info_debug, 'w', encoding="utf-8", errors="ignore") as f: f.write( json.dumps( prepare_compiler_info.prepare( os.path.join(args.report_dir, "compiler_info.json"), args.sources_root), indent=4)) # ctu-collect out = execute(["CodeChecker", "analyze", "--ctu-collect", compile_cmd_debug, "--compiler-info-file", compiler_info_debug, "-o", "report_debug", "--verbose", "debug"]) analyzer_command_debug = "analyzer-command_DEBUG" target = get_triple_arch('./analyzer-command') with open(analyzer_command_debug, 'w', encoding="utf-8", errors="ignore") as f: f.write( prepare_analyzer_cmd.prepare( "./analyzer-command", prepare_analyzer_cmd.PathOptions( args.sources_root, args.clang, args.clang_plugin_name, args.clang_plugin_path, "./report_debug/ctu-dir/" + target))) print( "Preparation of files for debugging is done. " "Now you can execute the generated analyzer command. " "E.g. $ bash % s" % analyzer_command_debug)
scripts/debug_tools/prepare_all_cmd_for_ctu.py
import argparse import json import os import platform import subprocess import prepare_compile_cmd import prepare_compiler_info import prepare_analyzer_cmd def execute(cmd): print("Executing command: " + ' '.join(cmd)) try: proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", errors="ignore") out, err = proc.communicate() print("stdout:\n\n" + out.decode("utf-8")) print("stderr:\n\n" + err.decode("utf-8")) if proc.returncode != 0: print('Unsuccessful run: "' + ' '.join(cmd) + '"') raise Exception("Unsuccessful run of command.") return out except OSError: print('Failed to run: "' + ' '.join(cmd) + '"') raise def get_triple_arch(analyze_command_file): with open(analyze_command_file, encoding="utf-8", errors="ignore") as f: cmd = f.readline() cmd = cmd.split() for flag in cmd: if flag.startswith('--target='): return flag[9:].split('-')[0] # 9 == len('--target=') return platform.machine() if __name__ == '__main__': parser = argparse.ArgumentParser( description='Prepare all commands ' 'to execute in local environmennt for debugging.') parser.add_argument( '--sources_root', default='./sources-root', help="Path of the source root.") parser.add_argument( '--report_dir', default='..', help="Path of the report dir.") parser.add_argument( '--clang', required=True, help="Path to the clang binary.") parser.add_argument( '--clang_plugin_name', default=None, help="Name of the used clang plugin.") parser.add_argument( '--clang_plugin_path', default=None, help="Path to the used clang plugin.") args = parser.parse_args() compile_cmd_debug = "compile_cmd_DEBUG.json" with open(compile_cmd_debug, 'w', encoding="utf-8", errors="ignore") as f: f.write( json.dumps( prepare_compile_cmd.prepare( os.path.join(args.report_dir, "compile_cmd.json"), args.sources_root), indent=4)) compiler_info_debug = "compiler_info_DEBUG.json" with open(compiler_info_debug, 'w', encoding="utf-8", errors="ignore") as f: f.write( json.dumps( prepare_compiler_info.prepare( os.path.join(args.report_dir, "compiler_info.json"), args.sources_root), indent=4)) # ctu-collect out = execute(["CodeChecker", "analyze", "--ctu-collect", compile_cmd_debug, "--compiler-info-file", compiler_info_debug, "-o", "report_debug", "--verbose", "debug"]) analyzer_command_debug = "analyzer-command_DEBUG" target = get_triple_arch('./analyzer-command') with open(analyzer_command_debug, 'w', encoding="utf-8", errors="ignore") as f: f.write( prepare_analyzer_cmd.prepare( "./analyzer-command", prepare_analyzer_cmd.PathOptions( args.sources_root, args.clang, args.clang_plugin_name, args.clang_plugin_path, "./report_debug/ctu-dir/" + target))) print( "Preparation of files for debugging is done. " "Now you can execute the generated analyzer command. " "E.g. $ bash % s" % analyzer_command_debug)
0.283385
0.062217
from .QPro import Gate from .baseClasses import setAttr from ..QuantumToolbox import evolution from ..QuantumToolbox import operators #pylint: disable=relative-beyond-top-level from ..QuantumToolbox import spinRotations #pylint: disable=relative-beyond-top-level class SpinRotation(Gate): # pylint: disable=too-many-ancestors label = 'SpinRotation' #: (**class attribute**) number of instances created internally by the library _internalInstances: int = 0 #: (**class attribute**) number of instances created explicitly by the user _externalInstances: int = 0 #: (**class attribute**) number of total instances = _internalInstances + _externalInstances _instances: int = 0 __slots__ = ['__angle', '__rotationAxis', 'phase', '_rotationOp'] def __init__(self, **kwargs): super().__init__() self.__angle = None self.__rotationAxis = None self._rotationOp = None self.phase = 1 #self._createUnitary = self._rotMat self._named__setKwargs(**kwargs) # pylint: disable=no-member @property def angle(self): return self._SpinRotation__angle @angle.setter def angle(self, val): setAttr(self, '_SpinRotation__angle', val) @property def rotationAxis(self): return self._SpinRotation__rotationAxis # pylint: disable=no-member @rotationAxis.setter def rotationAxis(self, axStr): setAttr(self, '_SpinRotation__rotationAxis', axStr) if axStr.lower() == 'x': self._rotationOp = operators.Jx elif axStr.lower() == 'y': self._rotationOp = operators.Jy elif axStr.lower() == 'z': self._rotationOp = operators.Jz else: raise ValueError('unknown axis') def _rotMat(self, collapseOps = None, decayRates = None, openSys=False): #pylint:disable=unused-argument if ((self._paramBoundBase__matrix is None) or (self._paramBoundBase__paramUpdated is True)): # pylint: disable=no-member sys = list(self.subSys.values()) rotOp = self._rotationOp flipOp = operators.compositeOp(rotOp(sys[0].dimension, isDim=True), sys[0]._dimsBefore, sys[0]._dimsAfter) # pylint: disable=no-member,line-too-long # noqa: E501 flipUn = evolution.Unitary(self.phase*self.angle*flipOp) for i in range(len(sys)-1): flipOpN = operators.compositeOp(rotOp(sys[i+1].dimension, isDim=True), sys[i+1]._dimsBefore, sys[i+1]._dimsAfter) flipUn = evolution.Unitary(self.phase*self.angle*flipOpN) @ flipUn self._paramBoundBase__matrix = evolution._prepostSO(flipUn) if (openSys or isinstance(collapseOps, list) or self._isOpen) else flipUn # pylint: disable=assigning-non-slot,line-too-long,protected-access self._paramBoundBase__paramUpdated = False # pylint: disable=assigning-non-slot return self._paramBoundBase__matrix # pylint: disable=no-member class xGate(SpinRotation): # pylint: disable=too-many-ancestors label = 'xGate' #: (**class attribute**) number of instances created internally by the library _internalInstances: int = 0 #: (**class attribute**) number of instances created explicitly by the user _externalInstances: int = 0 #: (**class attribute**) number of total instances = _internalInstances + _externalInstances _instances: int = 0 __slots__ = [] def __init__(self, **kwargs): super().__init__() self.rotationAxis = 'x' #self._createUnitary = self._gateImplements self._named__setKwargs(**kwargs) # pylint: disable=no-member def instantFlip(self, openSys=False): if ((self._paramBoundBase__matrix is None) or (self._paramBoundBase__paramUpdated is True)): # pylint: disable=no-member sys = list(self.subSys.values()) if self.rotationAxis.lower() == 'x': rotOp = spinRotations.xRotation elif self.rotationAxis.lower() == 'y': rotOp = spinRotations.yRotation elif self.rotationAxis.lower() == 'z': rotOp = spinRotations.zRotation flipOp = operators.compositeOp(rotOp(self.angle), sys[0]._dimsBefore, sys[0]._dimsAfter) # pylint: disable=no-member for i in range(len(sys)-1): flipOp = operators.compositeOp(rotOp(self.angle), sys[i+1]._dimsBefore, sys[i+1]._dimsAfter) @ flipOp self._paramBoundBase__matrix = evolution._prepostSO(flipOp) if openSys else flipOp # pylint: disable=assigning-non-slot,protected-access self._paramBoundBase__paramUpdated = False # pylint: disable=assigning-non-slot return self._paramBoundBase__matrix # pylint: disable=no-member def _gateImplements(self, collapseOps = None, decayRates = None): #pylint:disable=unused-argument if self.implementation is None: unitary = self._rotMat(openSys = isinstance(collapseOps, list) or self._isOpen) elif self.implementation.lower() in ('instant', 'flip'): # pylint: disable=no-member unitary = self.instantFlip(openSys = isinstance(collapseOps, list) or self._isOpen) return unitary SpinRotation._createUnitary = SpinRotation._rotMat # pylint: disable=protected-access xGate._createUnitary = xGate._gateImplements
src/quanguru/classes/QGates.py
from .QPro import Gate from .baseClasses import setAttr from ..QuantumToolbox import evolution from ..QuantumToolbox import operators #pylint: disable=relative-beyond-top-level from ..QuantumToolbox import spinRotations #pylint: disable=relative-beyond-top-level class SpinRotation(Gate): # pylint: disable=too-many-ancestors label = 'SpinRotation' #: (**class attribute**) number of instances created internally by the library _internalInstances: int = 0 #: (**class attribute**) number of instances created explicitly by the user _externalInstances: int = 0 #: (**class attribute**) number of total instances = _internalInstances + _externalInstances _instances: int = 0 __slots__ = ['__angle', '__rotationAxis', 'phase', '_rotationOp'] def __init__(self, **kwargs): super().__init__() self.__angle = None self.__rotationAxis = None self._rotationOp = None self.phase = 1 #self._createUnitary = self._rotMat self._named__setKwargs(**kwargs) # pylint: disable=no-member @property def angle(self): return self._SpinRotation__angle @angle.setter def angle(self, val): setAttr(self, '_SpinRotation__angle', val) @property def rotationAxis(self): return self._SpinRotation__rotationAxis # pylint: disable=no-member @rotationAxis.setter def rotationAxis(self, axStr): setAttr(self, '_SpinRotation__rotationAxis', axStr) if axStr.lower() == 'x': self._rotationOp = operators.Jx elif axStr.lower() == 'y': self._rotationOp = operators.Jy elif axStr.lower() == 'z': self._rotationOp = operators.Jz else: raise ValueError('unknown axis') def _rotMat(self, collapseOps = None, decayRates = None, openSys=False): #pylint:disable=unused-argument if ((self._paramBoundBase__matrix is None) or (self._paramBoundBase__paramUpdated is True)): # pylint: disable=no-member sys = list(self.subSys.values()) rotOp = self._rotationOp flipOp = operators.compositeOp(rotOp(sys[0].dimension, isDim=True), sys[0]._dimsBefore, sys[0]._dimsAfter) # pylint: disable=no-member,line-too-long # noqa: E501 flipUn = evolution.Unitary(self.phase*self.angle*flipOp) for i in range(len(sys)-1): flipOpN = operators.compositeOp(rotOp(sys[i+1].dimension, isDim=True), sys[i+1]._dimsBefore, sys[i+1]._dimsAfter) flipUn = evolution.Unitary(self.phase*self.angle*flipOpN) @ flipUn self._paramBoundBase__matrix = evolution._prepostSO(flipUn) if (openSys or isinstance(collapseOps, list) or self._isOpen) else flipUn # pylint: disable=assigning-non-slot,line-too-long,protected-access self._paramBoundBase__paramUpdated = False # pylint: disable=assigning-non-slot return self._paramBoundBase__matrix # pylint: disable=no-member class xGate(SpinRotation): # pylint: disable=too-many-ancestors label = 'xGate' #: (**class attribute**) number of instances created internally by the library _internalInstances: int = 0 #: (**class attribute**) number of instances created explicitly by the user _externalInstances: int = 0 #: (**class attribute**) number of total instances = _internalInstances + _externalInstances _instances: int = 0 __slots__ = [] def __init__(self, **kwargs): super().__init__() self.rotationAxis = 'x' #self._createUnitary = self._gateImplements self._named__setKwargs(**kwargs) # pylint: disable=no-member def instantFlip(self, openSys=False): if ((self._paramBoundBase__matrix is None) or (self._paramBoundBase__paramUpdated is True)): # pylint: disable=no-member sys = list(self.subSys.values()) if self.rotationAxis.lower() == 'x': rotOp = spinRotations.xRotation elif self.rotationAxis.lower() == 'y': rotOp = spinRotations.yRotation elif self.rotationAxis.lower() == 'z': rotOp = spinRotations.zRotation flipOp = operators.compositeOp(rotOp(self.angle), sys[0]._dimsBefore, sys[0]._dimsAfter) # pylint: disable=no-member for i in range(len(sys)-1): flipOp = operators.compositeOp(rotOp(self.angle), sys[i+1]._dimsBefore, sys[i+1]._dimsAfter) @ flipOp self._paramBoundBase__matrix = evolution._prepostSO(flipOp) if openSys else flipOp # pylint: disable=assigning-non-slot,protected-access self._paramBoundBase__paramUpdated = False # pylint: disable=assigning-non-slot return self._paramBoundBase__matrix # pylint: disable=no-member def _gateImplements(self, collapseOps = None, decayRates = None): #pylint:disable=unused-argument if self.implementation is None: unitary = self._rotMat(openSys = isinstance(collapseOps, list) or self._isOpen) elif self.implementation.lower() in ('instant', 'flip'): # pylint: disable=no-member unitary = self.instantFlip(openSys = isinstance(collapseOps, list) or self._isOpen) return unitary SpinRotation._createUnitary = SpinRotation._rotMat # pylint: disable=protected-access xGate._createUnitary = xGate._gateImplements
0.705176
0.15633
import asyncio import json import logging import multiprocessing import time import contextlib from django.apps import AppConfig from django.conf import settings import glob from hfc.fabric import Client from hfc.fabric.peer import Peer from hfc.fabric.user import create_user from hfc.util.keyvaluestore import FileKeyValueStore from substrapp.tasks.tasks import prepare_tuple, on_compute_plan from substrapp.utils import get_owner from substrapp.ledger.connection import get_hfc, ledger_grpc_options from celery.result import AsyncResult logger = logging.getLogger(__name__) @contextlib.contextmanager def get_event_loop(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: yield loop finally: loop.close() def tuple_get_worker(event_type, asset): if event_type == 'aggregatetuple': return asset['worker'] return asset['dataset']['worker'] def on_tuples_event(channel_name, block_number, tx_id, tx_status, event_type, asset): owner = get_owner() worker_queue = f"{settings.ORG_NAME}.worker" key = asset['key'] status = asset['status'] if tx_status != 'VALID': logger.error( f'Failed transaction on task {key}: type={event_type}' f' status={status} with tx status: {tx_status}') return logger.info(f'Processing task {key}: type={event_type} status={status}') if status != 'todo': return if event_type is None: return tuple_owner = tuple_get_worker(event_type, asset) if tuple_owner != owner: logger.info(f'Skipping task {key}: owner does not match' f' ({tuple_owner} vs {owner})') return if AsyncResult(key).state != 'PENDING': logger.info(f'Skipping task {key}: already exists') return prepare_tuple.apply_async( (channel_name, asset, event_type), task_id=key, queue=worker_queue ) def on_compute_plan_event(channel_name, block_number, tx_id, tx_status, asset): worker_queue = f"{settings.ORG_NAME}.worker" key = asset['compute_plan_key'] # Currently, we received this event on done, failed and canceled status # We apply the same behavior for those three status. # In the future, we can apply a conditional strategy based on the status. status = asset['status'] if tx_status != 'VALID': logger.error( f'Failed transaction on cleaning task {key}: type=computePlan' f' status={status} with tx status: {tx_status}') return logger.info(f'Processing cleaning task {key}: type=computePlan status={status}') task_id = f'{key}_{tx_id}' if AsyncResult(task_id).state != 'PENDING': logger.info(f'Skipping cleaning task: already exists. ' f'Info: compute_plan={key}, block_numer={block_number}, tx_id={tx_id}') return on_compute_plan.apply_async( (channel_name, asset, ), task_id=task_id, queue=worker_queue ) def on_event(channel_name, cc_event, block_number, tx_id, tx_status): payload = json.loads(cc_event['payload']) for event_type, assets in payload.items(): if not assets: continue for asset in assets: if event_type == 'compute_plan': on_compute_plan_event(channel_name, block_number, tx_id, tx_status, asset) else: on_tuples_event(channel_name, block_number, tx_id, tx_status, event_type, asset) def wait(channel_name): def on_channel_event(cc_event, block_number, tx_id, tx_status): on_event(channel_name, cc_event, block_number, tx_id, tx_status) with get_event_loop() as loop: client = Client() channel = client.new_channel(channel_name) target_peer = Peer(name=settings.LEDGER_PEER_NAME) target_peer.init_with_bundle({ 'url': f'{settings.LEDGER_PEER_HOST}:{settings.LEDGER_PEER_PORT}', 'grpcOptions': ledger_grpc_options(settings.LEDGER_PEER_HOST), 'tlsCACerts': {'path': settings.LEDGER_PEER_TLS_CA_CERTS}, 'clientKey': {'path': settings.LEDGER_PEER_TLS_CLIENT_KEY}, 'clientCert': {'path': settings.LEDGER_PEER_TLS_CLIENT_CERT}, }) try: # can fail requestor = create_user( name=f'{settings.LEDGER_USER_NAME}_events', org=settings.ORG_NAME, state_store=FileKeyValueStore(settings.LEDGER_CLIENT_STATE_STORE), msp_id=settings.LEDGER_MSP_ID, key_path=glob.glob(settings.LEDGER_CLIENT_KEY_PATH)[0], cert_path=settings.LEDGER_CLIENT_CERT_PATH ) except BaseException: pass else: # Note: # We do a loop to connect to the channel event hub because grpc may disconnect and create an exception # Since we're in a django app of backend, an exception here will not crash the server (if the "ready" # method has already returned "true"). # It makes it difficult to reconnect automatically because we need to kill the server # to trigger the connexion. # So we catch this exception (RPC error) and retry to connect to the event loop. while True: # use chaincode event channel_event_hub = channel.newChannelEventHub(target_peer, requestor) try: # We want to replay blocks from the beginning (start=0) if channel event hub was disconnected during # events emission stream = channel_event_hub.connect(start=0, filtered=False) channel_event_hub.registerChaincodeEvent( settings.LEDGER_CHANNELS[channel_name]['chaincode']['name'], 'chaincode-updates', onEvent=on_channel_event) logger.info(f'Connect to Channel Event Hub ({channel_name})') loop.run_until_complete(stream) except Exception as e: logger.error(f'Channel Event Hub failed for {channel_name} ({type(e)}): {e} re-connecting in 5s') time.sleep(5) class EventsConfig(AppConfig): name = 'events' def listen_to_channel(self, channel_name): # We try to connect a client first, if it fails the backend will not start. # It prevents potential issues when we launch the channel event hub in a subprocess. while True: try: with get_hfc(channel_name) as (loop, client, user): logger.info(f'Events: Connected to channel {channel_name}.') except Exception as e: logger.exception(e) time.sleep(5) logger.error(f'Events: Retry connecting to channel {channel_name}.') else: break p1 = multiprocessing.Process(target=wait, args=[channel_name]) p1.start() def ready(self): for channel_name in settings.LEDGER_CHANNELS.keys(): self.listen_to_channel(channel_name)
backend/events/apps.py
import asyncio import json import logging import multiprocessing import time import contextlib from django.apps import AppConfig from django.conf import settings import glob from hfc.fabric import Client from hfc.fabric.peer import Peer from hfc.fabric.user import create_user from hfc.util.keyvaluestore import FileKeyValueStore from substrapp.tasks.tasks import prepare_tuple, on_compute_plan from substrapp.utils import get_owner from substrapp.ledger.connection import get_hfc, ledger_grpc_options from celery.result import AsyncResult logger = logging.getLogger(__name__) @contextlib.contextmanager def get_event_loop(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: yield loop finally: loop.close() def tuple_get_worker(event_type, asset): if event_type == 'aggregatetuple': return asset['worker'] return asset['dataset']['worker'] def on_tuples_event(channel_name, block_number, tx_id, tx_status, event_type, asset): owner = get_owner() worker_queue = f"{settings.ORG_NAME}.worker" key = asset['key'] status = asset['status'] if tx_status != 'VALID': logger.error( f'Failed transaction on task {key}: type={event_type}' f' status={status} with tx status: {tx_status}') return logger.info(f'Processing task {key}: type={event_type} status={status}') if status != 'todo': return if event_type is None: return tuple_owner = tuple_get_worker(event_type, asset) if tuple_owner != owner: logger.info(f'Skipping task {key}: owner does not match' f' ({tuple_owner} vs {owner})') return if AsyncResult(key).state != 'PENDING': logger.info(f'Skipping task {key}: already exists') return prepare_tuple.apply_async( (channel_name, asset, event_type), task_id=key, queue=worker_queue ) def on_compute_plan_event(channel_name, block_number, tx_id, tx_status, asset): worker_queue = f"{settings.ORG_NAME}.worker" key = asset['compute_plan_key'] # Currently, we received this event on done, failed and canceled status # We apply the same behavior for those three status. # In the future, we can apply a conditional strategy based on the status. status = asset['status'] if tx_status != 'VALID': logger.error( f'Failed transaction on cleaning task {key}: type=computePlan' f' status={status} with tx status: {tx_status}') return logger.info(f'Processing cleaning task {key}: type=computePlan status={status}') task_id = f'{key}_{tx_id}' if AsyncResult(task_id).state != 'PENDING': logger.info(f'Skipping cleaning task: already exists. ' f'Info: compute_plan={key}, block_numer={block_number}, tx_id={tx_id}') return on_compute_plan.apply_async( (channel_name, asset, ), task_id=task_id, queue=worker_queue ) def on_event(channel_name, cc_event, block_number, tx_id, tx_status): payload = json.loads(cc_event['payload']) for event_type, assets in payload.items(): if not assets: continue for asset in assets: if event_type == 'compute_plan': on_compute_plan_event(channel_name, block_number, tx_id, tx_status, asset) else: on_tuples_event(channel_name, block_number, tx_id, tx_status, event_type, asset) def wait(channel_name): def on_channel_event(cc_event, block_number, tx_id, tx_status): on_event(channel_name, cc_event, block_number, tx_id, tx_status) with get_event_loop() as loop: client = Client() channel = client.new_channel(channel_name) target_peer = Peer(name=settings.LEDGER_PEER_NAME) target_peer.init_with_bundle({ 'url': f'{settings.LEDGER_PEER_HOST}:{settings.LEDGER_PEER_PORT}', 'grpcOptions': ledger_grpc_options(settings.LEDGER_PEER_HOST), 'tlsCACerts': {'path': settings.LEDGER_PEER_TLS_CA_CERTS}, 'clientKey': {'path': settings.LEDGER_PEER_TLS_CLIENT_KEY}, 'clientCert': {'path': settings.LEDGER_PEER_TLS_CLIENT_CERT}, }) try: # can fail requestor = create_user( name=f'{settings.LEDGER_USER_NAME}_events', org=settings.ORG_NAME, state_store=FileKeyValueStore(settings.LEDGER_CLIENT_STATE_STORE), msp_id=settings.LEDGER_MSP_ID, key_path=glob.glob(settings.LEDGER_CLIENT_KEY_PATH)[0], cert_path=settings.LEDGER_CLIENT_CERT_PATH ) except BaseException: pass else: # Note: # We do a loop to connect to the channel event hub because grpc may disconnect and create an exception # Since we're in a django app of backend, an exception here will not crash the server (if the "ready" # method has already returned "true"). # It makes it difficult to reconnect automatically because we need to kill the server # to trigger the connexion. # So we catch this exception (RPC error) and retry to connect to the event loop. while True: # use chaincode event channel_event_hub = channel.newChannelEventHub(target_peer, requestor) try: # We want to replay blocks from the beginning (start=0) if channel event hub was disconnected during # events emission stream = channel_event_hub.connect(start=0, filtered=False) channel_event_hub.registerChaincodeEvent( settings.LEDGER_CHANNELS[channel_name]['chaincode']['name'], 'chaincode-updates', onEvent=on_channel_event) logger.info(f'Connect to Channel Event Hub ({channel_name})') loop.run_until_complete(stream) except Exception as e: logger.error(f'Channel Event Hub failed for {channel_name} ({type(e)}): {e} re-connecting in 5s') time.sleep(5) class EventsConfig(AppConfig): name = 'events' def listen_to_channel(self, channel_name): # We try to connect a client first, if it fails the backend will not start. # It prevents potential issues when we launch the channel event hub in a subprocess. while True: try: with get_hfc(channel_name) as (loop, client, user): logger.info(f'Events: Connected to channel {channel_name}.') except Exception as e: logger.exception(e) time.sleep(5) logger.error(f'Events: Retry connecting to channel {channel_name}.') else: break p1 = multiprocessing.Process(target=wait, args=[channel_name]) p1.start() def ready(self): for channel_name in settings.LEDGER_CHANNELS.keys(): self.listen_to_channel(channel_name)
0.452294
0.069732
from __future__ import unicode_literals from snapshottest import Snapshot snapshots = Snapshot() snapshots['test_create_execution_plan_with_dep 1'] = '''{ "__class__": "ExecutionPlanSnapshot", "artifacts_persisted": true, "executor_name": "in_process", "initial_known_state": null, "pipeline_snapshot_id": "330e31c23c3edacaa7a9224039b53a703f011788", "snapshot_version": 1, "step_keys_to_execute": [ "solid_one", "solid_two" ], "steps": [ { "__class__": "ExecutionStepSnap", "inputs": [], "key": "solid_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "solid_one", "parent": null } } ], "solid_handle_id": "solid_one", "step_handle": { "__class__": "StepHandle", "key": "solid_one", "solid_handle": { "__class__": "SolidHandle", "name": "solid_one", "parent": null } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [ { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Any", "name": "num", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num", "solid_handle": { "__class__": "SolidHandle", "name": "solid_two", "parent": null }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "solid_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "solid_one" } ] } ], "key": "solid_two", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "solid_two", "parent": null } } ], "solid_handle_id": "solid_two", "step_handle": { "__class__": "StepHandle", "key": "solid_two", "solid_handle": { "__class__": "SolidHandle", "name": "solid_two", "parent": null } }, "tags": {} } ] }''' snapshots['test_create_noop_execution_plan 1'] = '''{ "__class__": "ExecutionPlanSnapshot", "artifacts_persisted": true, "executor_name": "in_process", "initial_known_state": null, "pipeline_snapshot_id": "7ffd65ba8633d4c172a7b15dfee5927bed301724", "snapshot_version": 1, "step_keys_to_execute": [ "noop_solid" ], "steps": [ { "__class__": "ExecutionStepSnap", "inputs": [], "key": "noop_solid", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "noop_solid", "parent": null } } ], "solid_handle_id": "noop_solid", "step_handle": { "__class__": "StepHandle", "key": "noop_solid", "solid_handle": { "__class__": "SolidHandle", "name": "noop_solid", "parent": null } }, "tags": {} } ] }''' snapshots['test_create_noop_execution_plan_with_tags 1'] = '''{ "__class__": "ExecutionPlanSnapshot", "artifacts_persisted": true, "executor_name": "in_process", "initial_known_state": null, "pipeline_snapshot_id": "b96bfd4d61336a6ed2016679d1467c3e1daa3285", "snapshot_version": 1, "step_keys_to_execute": [ "noop_solid" ], "steps": [ { "__class__": "ExecutionStepSnap", "inputs": [], "key": "noop_solid", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [ { "__class__": "ExecutionPlanMetadataItemSnap", "key": "bar", "value": "baaz" }, { "__class__": "ExecutionPlanMetadataItemSnap", "key": "foo", "value": "bar" } ], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "noop_solid", "parent": null } } ], "solid_handle_id": "noop_solid", "step_handle": { "__class__": "StepHandle", "key": "noop_solid", "solid_handle": { "__class__": "SolidHandle", "name": "noop_solid", "parent": null } }, "tags": { "bar": "baaz", "foo": "bar" } } ] }''' snapshots['test_create_with_composite 1'] = '''{ "__class__": "ExecutionPlanSnapshot", "artifacts_persisted": true, "executor_name": "in_process", "initial_known_state": null, "pipeline_snapshot_id": "7bb46b4373672e250386288663f7eca81f0a0a02", "snapshot_version": 1, "step_keys_to_execute": [ "comp_1.return_one", "comp_1.add_one", "comp_2.return_one", "comp_2.add_one", "add" ], "steps": [ { "__class__": "ExecutionStepSnap", "inputs": [ { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Any", "name": "num_one", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num_one", "solid_handle": { "__class__": "SolidHandle", "name": "add", "parent": null }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "comp_1.add_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "comp_1.add_one" } ] }, { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Any", "name": "num_two", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num_two", "solid_handle": { "__class__": "SolidHandle", "name": "add", "parent": null }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "comp_2.add_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "comp_2.add_one" } ] } ], "key": "add", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "add", "parent": null } } ], "solid_handle_id": "add", "step_handle": { "__class__": "StepHandle", "key": "add", "solid_handle": { "__class__": "SolidHandle", "name": "add", "parent": null } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [ { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Int", "name": "num", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num", "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "out_num", "step_key": "comp_1.return_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "out_num", "step_key": "comp_1.return_one" } ] } ], "key": "comp_1.add_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Int", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } } } ], "solid_handle_id": "comp_1.add_one", "step_handle": { "__class__": "StepHandle", "key": "comp_1.add_one", "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [], "key": "comp_1.return_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Int", "name": "out_num", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "return_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } } } ], "solid_handle_id": "comp_1.return_one", "step_handle": { "__class__": "StepHandle", "key": "comp_1.return_one", "solid_handle": { "__class__": "SolidHandle", "name": "return_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [ { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Int", "name": "num", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num", "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "out_num", "step_key": "comp_2.return_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "out_num", "step_key": "comp_2.return_one" } ] } ], "key": "comp_2.add_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Int", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } } } ], "solid_handle_id": "comp_2.add_one", "step_handle": { "__class__": "StepHandle", "key": "comp_2.add_one", "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [], "key": "comp_2.return_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Int", "name": "out_num", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "return_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } } } ], "solid_handle_id": "comp_2.return_one", "step_handle": { "__class__": "StepHandle", "key": "comp_2.return_one", "solid_handle": { "__class__": "SolidHandle", "name": "return_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } } }, "tags": {} } ] }'''
python_modules/dagster/dagster_tests/core_tests/snap_tests/snapshots/snap_test_execution_plan.py
from __future__ import unicode_literals from snapshottest import Snapshot snapshots = Snapshot() snapshots['test_create_execution_plan_with_dep 1'] = '''{ "__class__": "ExecutionPlanSnapshot", "artifacts_persisted": true, "executor_name": "in_process", "initial_known_state": null, "pipeline_snapshot_id": "330e31c23c3edacaa7a9224039b53a703f011788", "snapshot_version": 1, "step_keys_to_execute": [ "solid_one", "solid_two" ], "steps": [ { "__class__": "ExecutionStepSnap", "inputs": [], "key": "solid_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "solid_one", "parent": null } } ], "solid_handle_id": "solid_one", "step_handle": { "__class__": "StepHandle", "key": "solid_one", "solid_handle": { "__class__": "SolidHandle", "name": "solid_one", "parent": null } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [ { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Any", "name": "num", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num", "solid_handle": { "__class__": "SolidHandle", "name": "solid_two", "parent": null }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "solid_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "solid_one" } ] } ], "key": "solid_two", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "solid_two", "parent": null } } ], "solid_handle_id": "solid_two", "step_handle": { "__class__": "StepHandle", "key": "solid_two", "solid_handle": { "__class__": "SolidHandle", "name": "solid_two", "parent": null } }, "tags": {} } ] }''' snapshots['test_create_noop_execution_plan 1'] = '''{ "__class__": "ExecutionPlanSnapshot", "artifacts_persisted": true, "executor_name": "in_process", "initial_known_state": null, "pipeline_snapshot_id": "7ffd65ba8633d4c172a7b15dfee5927bed301724", "snapshot_version": 1, "step_keys_to_execute": [ "noop_solid" ], "steps": [ { "__class__": "ExecutionStepSnap", "inputs": [], "key": "noop_solid", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "noop_solid", "parent": null } } ], "solid_handle_id": "noop_solid", "step_handle": { "__class__": "StepHandle", "key": "noop_solid", "solid_handle": { "__class__": "SolidHandle", "name": "noop_solid", "parent": null } }, "tags": {} } ] }''' snapshots['test_create_noop_execution_plan_with_tags 1'] = '''{ "__class__": "ExecutionPlanSnapshot", "artifacts_persisted": true, "executor_name": "in_process", "initial_known_state": null, "pipeline_snapshot_id": "b96bfd4d61336a6ed2016679d1467c3e1daa3285", "snapshot_version": 1, "step_keys_to_execute": [ "noop_solid" ], "steps": [ { "__class__": "ExecutionStepSnap", "inputs": [], "key": "noop_solid", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [ { "__class__": "ExecutionPlanMetadataItemSnap", "key": "bar", "value": "baaz" }, { "__class__": "ExecutionPlanMetadataItemSnap", "key": "foo", "value": "bar" } ], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "noop_solid", "parent": null } } ], "solid_handle_id": "noop_solid", "step_handle": { "__class__": "StepHandle", "key": "noop_solid", "solid_handle": { "__class__": "SolidHandle", "name": "noop_solid", "parent": null } }, "tags": { "bar": "baaz", "foo": "bar" } } ] }''' snapshots['test_create_with_composite 1'] = '''{ "__class__": "ExecutionPlanSnapshot", "artifacts_persisted": true, "executor_name": "in_process", "initial_known_state": null, "pipeline_snapshot_id": "7bb46b4373672e250386288663f7eca81f0a0a02", "snapshot_version": 1, "step_keys_to_execute": [ "comp_1.return_one", "comp_1.add_one", "comp_2.return_one", "comp_2.add_one", "add" ], "steps": [ { "__class__": "ExecutionStepSnap", "inputs": [ { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Any", "name": "num_one", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num_one", "solid_handle": { "__class__": "SolidHandle", "name": "add", "parent": null }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "comp_1.add_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "comp_1.add_one" } ] }, { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Any", "name": "num_two", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num_two", "solid_handle": { "__class__": "SolidHandle", "name": "add", "parent": null }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "comp_2.add_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "result", "step_key": "comp_2.add_one" } ] } ], "key": "add", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Any", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "add", "parent": null } } ], "solid_handle_id": "add", "step_handle": { "__class__": "StepHandle", "key": "add", "solid_handle": { "__class__": "SolidHandle", "name": "add", "parent": null } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [ { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Int", "name": "num", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num", "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "out_num", "step_key": "comp_1.return_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "out_num", "step_key": "comp_1.return_one" } ] } ], "key": "comp_1.add_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Int", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } } } ], "solid_handle_id": "comp_1.add_one", "step_handle": { "__class__": "StepHandle", "key": "comp_1.add_one", "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [], "key": "comp_1.return_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Int", "name": "out_num", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "return_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } } } ], "solid_handle_id": "comp_1.return_one", "step_handle": { "__class__": "StepHandle", "key": "comp_1.return_one", "solid_handle": { "__class__": "SolidHandle", "name": "return_one", "parent": { "__class__": "SolidHandle", "name": "comp_1", "parent": null } } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [ { "__class__": "ExecutionStepInputSnap", "dagster_type_key": "Int", "name": "num", "source": { "__class__": "FromStepOutput", "fan_in": false, "input_name": "num", "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } }, "step_output_handle": { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "out_num", "step_key": "comp_2.return_one" } }, "upstream_output_handles": [ { "__class__": "StepOutputHandle", "mapping_key": null, "output_name": "out_num", "step_key": "comp_2.return_one" } ] } ], "key": "comp_2.add_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Int", "name": "result", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } } } ], "solid_handle_id": "comp_2.add_one", "step_handle": { "__class__": "StepHandle", "key": "comp_2.add_one", "solid_handle": { "__class__": "SolidHandle", "name": "add_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } } }, "tags": {} }, { "__class__": "ExecutionStepSnap", "inputs": [], "key": "comp_2.return_one", "kind": { "__enum__": "StepKind.COMPUTE" }, "metadata_items": [], "outputs": [ { "__class__": "ExecutionStepOutputSnap", "dagster_type_key": "Int", "name": "out_num", "properties": { "__class__": "StepOutputProperties", "asset_key": null, "is_asset": false, "is_dynamic": false, "is_required": true, "should_materialize": false }, "solid_handle": { "__class__": "SolidHandle", "name": "return_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } } } ], "solid_handle_id": "comp_2.return_one", "step_handle": { "__class__": "StepHandle", "key": "comp_2.return_one", "solid_handle": { "__class__": "SolidHandle", "name": "return_one", "parent": { "__class__": "SolidHandle", "name": "comp_2", "parent": null } } }, "tags": {} } ] }'''
0.649912
0.346873
import os from abc import abstractmethod from pants.backend.jvm.tasks.classpath_entry import ClasspathEntry from pants.base.build_environment import get_buildroot from pants.engine.fs import Digest, PathGlobs, PathGlobsAndRoot from pants.task.task import Task from pants.util.dirutil import fast_relpath class ResourcesTask(Task): """A base class for tasks that process or create resource files. This base assumes that resources targets or targets that generate resources are independent from each other and can be processed in isolation in any order. :API: public """ @classmethod def product_types(cls): return ['runtime_classpath'] @classmethod def register_options(cls, register): super().register_options(register) register('--confs', advanced=True, type=list, default=['default'], help='Prepare resources for these Ivy confs.') @classmethod def prepare(cls, options, round_manager): round_manager.require_data('compile_classpath') @property def cache_target_dirs(self): return True def execute(self): # Tracked and returned for use in tests. # TODO: Rewrite those tests. execute() is not supposed to return anything. processed_targets = [] compile_classpath = self.context.products.get_data('compile_classpath') runtime_classpath = self.context.products.get_data('runtime_classpath', compile_classpath.copy) all_relevant_resources_targets = self.find_all_relevant_resources_targets() if not all_relevant_resources_targets: return processed_targets with self.invalidated(targets=all_relevant_resources_targets, fingerprint_strategy=self.create_invalidation_strategy(), invalidate_dependents=False, topological_order=False) as invalidation: for vt in invalidation.invalid_vts: # Generate resources to the chroot. self.prepare_resources(vt.target, vt.results_dir) processed_targets.append(vt.target) for vt, digest in self._capture_resources(invalidation.all_vts): # Register the target's chroot in the products. for conf in self.get_options().confs: runtime_classpath.add_for_target(vt.target, [(conf, ClasspathEntry(vt.results_dir, digest))]) return processed_targets def _capture_resources(self, vts): """Given a list of VersionedTargets, capture DirectoryDigests for all of them. :returns: A list of tuples of VersionedTargets and digests for their content. """ # Capture Snapshots for each directory, using an optional adjacent digest. Create the digest # afterward if it does not exist. buildroot = get_buildroot() snapshots = self.context._scheduler.capture_snapshots( tuple( PathGlobsAndRoot( PathGlobs([os.path.join(fast_relpath(vt.results_dir, buildroot), '**')]), buildroot, Digest.load(vt.current_results_dir), ) for vt in vts )) result = [] for vt, snapshot in zip(vts, snapshots): snapshot.directory_digest.dump(vt.current_results_dir) result.append((vt, snapshot.directory_digest)) return result @abstractmethod def find_all_relevant_resources_targets(self): """Returns an iterable over all the relevant resources targets in the context.""" def create_invalidation_strategy(self): """Creates a custom fingerprint strategy for determining invalid resources targets. :returns: A custom fingerprint strategy to use for determining invalid targets, or `None` to use the standard target payload. :rtype: :class:`pants.base.fingerprint_strategy.FingerprintStrategy` """ return None @abstractmethod def prepare_resources(self, target, chroot): """Prepares the resources associated with `target` in the given `chroot`. :param target: The target to prepare resource files for. :type target: :class:`pants.build_graph.target.Target` :param string chroot: An existing, clean chroot dir to generate `target`'s resources to. """
src/python/pants/backend/jvm/tasks/resources_task.py
import os from abc import abstractmethod from pants.backend.jvm.tasks.classpath_entry import ClasspathEntry from pants.base.build_environment import get_buildroot from pants.engine.fs import Digest, PathGlobs, PathGlobsAndRoot from pants.task.task import Task from pants.util.dirutil import fast_relpath class ResourcesTask(Task): """A base class for tasks that process or create resource files. This base assumes that resources targets or targets that generate resources are independent from each other and can be processed in isolation in any order. :API: public """ @classmethod def product_types(cls): return ['runtime_classpath'] @classmethod def register_options(cls, register): super().register_options(register) register('--confs', advanced=True, type=list, default=['default'], help='Prepare resources for these Ivy confs.') @classmethod def prepare(cls, options, round_manager): round_manager.require_data('compile_classpath') @property def cache_target_dirs(self): return True def execute(self): # Tracked and returned for use in tests. # TODO: Rewrite those tests. execute() is not supposed to return anything. processed_targets = [] compile_classpath = self.context.products.get_data('compile_classpath') runtime_classpath = self.context.products.get_data('runtime_classpath', compile_classpath.copy) all_relevant_resources_targets = self.find_all_relevant_resources_targets() if not all_relevant_resources_targets: return processed_targets with self.invalidated(targets=all_relevant_resources_targets, fingerprint_strategy=self.create_invalidation_strategy(), invalidate_dependents=False, topological_order=False) as invalidation: for vt in invalidation.invalid_vts: # Generate resources to the chroot. self.prepare_resources(vt.target, vt.results_dir) processed_targets.append(vt.target) for vt, digest in self._capture_resources(invalidation.all_vts): # Register the target's chroot in the products. for conf in self.get_options().confs: runtime_classpath.add_for_target(vt.target, [(conf, ClasspathEntry(vt.results_dir, digest))]) return processed_targets def _capture_resources(self, vts): """Given a list of VersionedTargets, capture DirectoryDigests for all of them. :returns: A list of tuples of VersionedTargets and digests for their content. """ # Capture Snapshots for each directory, using an optional adjacent digest. Create the digest # afterward if it does not exist. buildroot = get_buildroot() snapshots = self.context._scheduler.capture_snapshots( tuple( PathGlobsAndRoot( PathGlobs([os.path.join(fast_relpath(vt.results_dir, buildroot), '**')]), buildroot, Digest.load(vt.current_results_dir), ) for vt in vts )) result = [] for vt, snapshot in zip(vts, snapshots): snapshot.directory_digest.dump(vt.current_results_dir) result.append((vt, snapshot.directory_digest)) return result @abstractmethod def find_all_relevant_resources_targets(self): """Returns an iterable over all the relevant resources targets in the context.""" def create_invalidation_strategy(self): """Creates a custom fingerprint strategy for determining invalid resources targets. :returns: A custom fingerprint strategy to use for determining invalid targets, or `None` to use the standard target payload. :rtype: :class:`pants.base.fingerprint_strategy.FingerprintStrategy` """ return None @abstractmethod def prepare_resources(self, target, chroot): """Prepares the resources associated with `target` in the given `chroot`. :param target: The target to prepare resource files for. :type target: :class:`pants.build_graph.target.Target` :param string chroot: An existing, clean chroot dir to generate `target`'s resources to. """
0.566258
0.194062
from __future__ import absolute_import from __future__ import division __all__ = ['plr_osnet'] import torch from torch import nn from torch.nn import functional as F import torchvision from .osnet_ain import * import copy import random import math from .attention_module import Attention_Module from .gen_mean_pool import GeM class PLR_OSNet(nn.Module): def __init__(self, num_classes, fc_dims=None, loss=None, pretrained=True, **kwargs): super(PLR_OSNet, self).__init__() osnet = osnet_ain_x1_0(pretrained=pretrained) self.loss = loss self.layer0 = nn.Sequential( osnet.conv1, osnet.maxpool ) self.layer1 = osnet.conv2 self.attention_module1 = Attention_Module(256) self.layer2 = osnet.conv3 self.attention_module2 = Attention_Module(384) self.layer30 = osnet.conv4 self.layer31 = nn.Sequential(copy.deepcopy(self.layer30)) self.layer40 = osnet.conv5 self.layer41 = nn.Sequential(copy.deepcopy(self.layer40)) # self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.global_avgpool = GeM() self.global_maxpool = nn.AdaptiveMaxPool2d((1, 1)) self.fc2 = nn.Linear(fc_dims, 512) self.bn1 = nn.BatchNorm1d(2048) self.bn2 = nn.BatchNorm1d(512) self.classifier1 = nn.Linear(2048, num_classes) self.classifier2 = nn.Linear(512, num_classes) nn.init.constant_(self.bn1.weight, 1.0) nn.init.constant_(self.bn1.bias, 0.0) nn.init.constant_(self.bn2.weight, 1.0) nn.init.constant_(self.bn2.bias, 0.0) nn.init.normal_(self.fc2.weight, 0, 0.01) if self.fc2.bias is not None: nn.init.constant_(self.fc2.bias, 0) nn.init.normal_(self.classifier1.weight, 0, 0.01) if self.classifier1.bias is not None: nn.init.constant_(self.classifier1.bias, 0) nn.init.normal_(self.classifier2.weight, 0, 0.01) if self.classifier2.bias is not None: nn.init.constant_(self.classifier2.bias, 0) def featuremaps(self, x): x = self.layer0(x) x = self.layer1(x) x = self.attention_module1(x) x = self.layer2(x) x = self.attention_module2(x) x1 = self.layer30(x) x2 = self.layer31(x) x1 = self.layer40(x1) x2 = self.layer41(x2) return x1, x2 def forward(self, x): f1, f2 = self.featuremaps(x) B, C, H, W = f1.size() f11 = f1[:, :, :H // 4, :] f12 = f1[:, :, H // 4:H // 2, :] f13 = f1[:, :, H // 2:(3 * H // 4), :] f14 = f1[:, :, (3 * H // 4):, :] v11 = self.global_avgpool(f11) v12 = self.global_avgpool(f12) v13 = self.global_avgpool(f13) v14 = self.global_avgpool(f14) v2 = self.global_maxpool(f2) v11 = v11.view(v11.size(0), -1) v12 = v12.view(v12.size(0), -1) v13 = v13.view(v13.size(0), -1) v14 = v14.view(v14.size(0), -1) v1 = torch.cat([v11, v12, v13, v14], 1) v2 = v2.view(v2.size(0), -1) v2 = self.fc2(v2) fea = [v1, v2] v1 = self.bn1(v1) v2 = self.bn2(v2) if not self.training: v1 = F.normalize(v1, p=2, dim=1) v2 = F.normalize(v2, p=2, dim=1) return torch.cat([v1, v2], 1) y1 = self.classifier1(v1) y2 = self.classifier2(v2) if self.loss == 'softmax': return y1, y2 elif self.loss == 'triplet': return y1, y2, fea else: raise KeyError("Unsupported loss: {}".format(self.loss)) def plr_osnet(num_classes, loss='softmax', pretrained=True, **kwargs): model = PLR_OSNet( num_classes=num_classes, fc_dims=512, loss=loss, pretrained=pretrained, **kwargs ) return model
torchreid/models/plr_osnet_ain.py
from __future__ import absolute_import from __future__ import division __all__ = ['plr_osnet'] import torch from torch import nn from torch.nn import functional as F import torchvision from .osnet_ain import * import copy import random import math from .attention_module import Attention_Module from .gen_mean_pool import GeM class PLR_OSNet(nn.Module): def __init__(self, num_classes, fc_dims=None, loss=None, pretrained=True, **kwargs): super(PLR_OSNet, self).__init__() osnet = osnet_ain_x1_0(pretrained=pretrained) self.loss = loss self.layer0 = nn.Sequential( osnet.conv1, osnet.maxpool ) self.layer1 = osnet.conv2 self.attention_module1 = Attention_Module(256) self.layer2 = osnet.conv3 self.attention_module2 = Attention_Module(384) self.layer30 = osnet.conv4 self.layer31 = nn.Sequential(copy.deepcopy(self.layer30)) self.layer40 = osnet.conv5 self.layer41 = nn.Sequential(copy.deepcopy(self.layer40)) # self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.global_avgpool = GeM() self.global_maxpool = nn.AdaptiveMaxPool2d((1, 1)) self.fc2 = nn.Linear(fc_dims, 512) self.bn1 = nn.BatchNorm1d(2048) self.bn2 = nn.BatchNorm1d(512) self.classifier1 = nn.Linear(2048, num_classes) self.classifier2 = nn.Linear(512, num_classes) nn.init.constant_(self.bn1.weight, 1.0) nn.init.constant_(self.bn1.bias, 0.0) nn.init.constant_(self.bn2.weight, 1.0) nn.init.constant_(self.bn2.bias, 0.0) nn.init.normal_(self.fc2.weight, 0, 0.01) if self.fc2.bias is not None: nn.init.constant_(self.fc2.bias, 0) nn.init.normal_(self.classifier1.weight, 0, 0.01) if self.classifier1.bias is not None: nn.init.constant_(self.classifier1.bias, 0) nn.init.normal_(self.classifier2.weight, 0, 0.01) if self.classifier2.bias is not None: nn.init.constant_(self.classifier2.bias, 0) def featuremaps(self, x): x = self.layer0(x) x = self.layer1(x) x = self.attention_module1(x) x = self.layer2(x) x = self.attention_module2(x) x1 = self.layer30(x) x2 = self.layer31(x) x1 = self.layer40(x1) x2 = self.layer41(x2) return x1, x2 def forward(self, x): f1, f2 = self.featuremaps(x) B, C, H, W = f1.size() f11 = f1[:, :, :H // 4, :] f12 = f1[:, :, H // 4:H // 2, :] f13 = f1[:, :, H // 2:(3 * H // 4), :] f14 = f1[:, :, (3 * H // 4):, :] v11 = self.global_avgpool(f11) v12 = self.global_avgpool(f12) v13 = self.global_avgpool(f13) v14 = self.global_avgpool(f14) v2 = self.global_maxpool(f2) v11 = v11.view(v11.size(0), -1) v12 = v12.view(v12.size(0), -1) v13 = v13.view(v13.size(0), -1) v14 = v14.view(v14.size(0), -1) v1 = torch.cat([v11, v12, v13, v14], 1) v2 = v2.view(v2.size(0), -1) v2 = self.fc2(v2) fea = [v1, v2] v1 = self.bn1(v1) v2 = self.bn2(v2) if not self.training: v1 = F.normalize(v1, p=2, dim=1) v2 = F.normalize(v2, p=2, dim=1) return torch.cat([v1, v2], 1) y1 = self.classifier1(v1) y2 = self.classifier2(v2) if self.loss == 'softmax': return y1, y2 elif self.loss == 'triplet': return y1, y2, fea else: raise KeyError("Unsupported loss: {}".format(self.loss)) def plr_osnet(num_classes, loss='softmax', pretrained=True, **kwargs): model = PLR_OSNet( num_classes=num_classes, fc_dims=512, loss=loss, pretrained=pretrained, **kwargs ) return model
0.916405
0.252384
import argparse import json from embers.sat_utils.sat_ephemeris import ephem_batch def main(): """ Analyse a batch of TLE files is with the :func:`~embers.sat_utils.sat_ephemeris.ephem_batch` function. Determine satellite ephemeris data: rise time, set time, alt/az arrays at a given time cadence. This is saved to a npz file which will be used to plot the satellite sky coverage over the geographic location supplied. .. code-block:: console $ ephem_batch --help """ _parser = argparse.ArgumentParser( description=""" Code which converts the TLE files downloaded with download_TLE.py into satellite ephemeris data: rise time, set time, alt/az arrays at a given time cadence. This is saved to a json file which will be used to plot the satellite passes. """ ) _parser.add_argument( "--tle_dir", metavar="\b", default="./embers_out/sat_utils/TLE", help="Path to directory with TLE files. Default=./embers_out/sat_utils/TLE", ) _parser.add_argument( "--cadence", metavar="\b", type=int, default=4, help="Rate at which sat alt/az is computed. default=4s", ) _parser.add_argument( "--location", metavar="\b", type=json.loads, default=(-26.703319, 116.670815, 337.83), help="Geographic location where satellite ephemeris is to be determined. Default=MWA:(-26.703319, 116.670815, 337.83)", ) _parser.add_argument( "--alpha", metavar="\b", type=int, default=0.5, help="Alpha value for sky coverage plot. Defaut: 0.5. If too many satellite, reduce value", ) _parser.add_argument( "--out_dir", metavar="\b", default="./embers_out/sat_utils", help="Path to output directory. Default=./embers_out/sat_utils", ) _parser.add_argument( "--max_cores", metavar="\b", type=int, help="Maximum number of cores to be used by this script. By default all core available cores are used", ) _args = _parser.parse_args() _tle_dir = _args.tle_dir _cadence = _args.cadence _location = _args.location _alpha = _args.alpha _out_dir = _args.out_dir _max_cores = _args.max_cores print(f"Saving logs to {_out_dir}/ephem_data") print(f"Saving sky coverage plots to {_out_dir}/ephem_plots") print(f"Saving ephemeris of satellites to {_out_dir}/ephem_data") ephem_batch(_tle_dir, _cadence, _location, _alpha, _out_dir, max_cores=_max_cores)
src/embers/kindle/ephem_batch.py
import argparse import json from embers.sat_utils.sat_ephemeris import ephem_batch def main(): """ Analyse a batch of TLE files is with the :func:`~embers.sat_utils.sat_ephemeris.ephem_batch` function. Determine satellite ephemeris data: rise time, set time, alt/az arrays at a given time cadence. This is saved to a npz file which will be used to plot the satellite sky coverage over the geographic location supplied. .. code-block:: console $ ephem_batch --help """ _parser = argparse.ArgumentParser( description=""" Code which converts the TLE files downloaded with download_TLE.py into satellite ephemeris data: rise time, set time, alt/az arrays at a given time cadence. This is saved to a json file which will be used to plot the satellite passes. """ ) _parser.add_argument( "--tle_dir", metavar="\b", default="./embers_out/sat_utils/TLE", help="Path to directory with TLE files. Default=./embers_out/sat_utils/TLE", ) _parser.add_argument( "--cadence", metavar="\b", type=int, default=4, help="Rate at which sat alt/az is computed. default=4s", ) _parser.add_argument( "--location", metavar="\b", type=json.loads, default=(-26.703319, 116.670815, 337.83), help="Geographic location where satellite ephemeris is to be determined. Default=MWA:(-26.703319, 116.670815, 337.83)", ) _parser.add_argument( "--alpha", metavar="\b", type=int, default=0.5, help="Alpha value for sky coverage plot. Defaut: 0.5. If too many satellite, reduce value", ) _parser.add_argument( "--out_dir", metavar="\b", default="./embers_out/sat_utils", help="Path to output directory. Default=./embers_out/sat_utils", ) _parser.add_argument( "--max_cores", metavar="\b", type=int, help="Maximum number of cores to be used by this script. By default all core available cores are used", ) _args = _parser.parse_args() _tle_dir = _args.tle_dir _cadence = _args.cadence _location = _args.location _alpha = _args.alpha _out_dir = _args.out_dir _max_cores = _args.max_cores print(f"Saving logs to {_out_dir}/ephem_data") print(f"Saving sky coverage plots to {_out_dir}/ephem_plots") print(f"Saving ephemeris of satellites to {_out_dir}/ephem_data") ephem_batch(_tle_dir, _cadence, _location, _alpha, _out_dir, max_cores=_max_cores)
0.795936
0.395484
__all__ = ['getch'] class _Getch: """ Gets a single character from standard input. Does not echo to the screen. """ def __init__(self, is_blocking=True): try: self.impl = _GetchWindows(is_blocking) except ImportError: self.impl = _GetchUnix(is_blocking) def __getattr__(self, attr): return getattr(self.impl, attr) def __call__(self): return self.impl() class _GetchUnix: def __init__(self, is_blocking): import tty, sys, termios self.fd = sys.stdin.fileno() self.old_settings = termios.tcgetattr(self.fd) if is_blocking is True: self._getch = self.blocking else: self._getch = self.non_blocking def blocking(self): import sys, tty, termios fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch def non_blocking(self): import sys, tty, termios old_settings = termios.tcgetattr(sys.stdin) ch = None try: tty.setcbreak(sys.stdin.fileno()) if self._is_data(): ch = sys.stdin.read(1) finally: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings) return ch def restore_settings(self): import sys, tty, termios termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings) @staticmethod def _is_data(): import select, sys return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []) def __call__(self): return self._getch() class _GetchWindows: def __init__(self, is_blocking): import msvcrt if is_blocking is True: self._getch = self.blocking else: self._getch = self.non_blocking def blocking(self): import msvcrt return msvcrt.getch() def non_blocking(self): import msvcrt if msvcrt.kbhit(): return msvcrt.getch() def restore_settings(self): pass def __call__(self): return self._getch() getch = _Getch() if __name__ == "__main__": import sys def getch_loop(is_blocking=True): print(f'{"Blocking" if is_blocking is True else "Non-blocking"} getch! Press any key! Esc to quit!') i = 0 getch_func = getch.blocking if is_blocking is True else getch.non_blocking while True: char = getch_func() if char or i % 15000 == 0: print(f'{i}: {char}') if char == '\x1b': # ESC key break i += 1 getch_file, *args = sys.argv print("Getch! Echo key press usage:\n" f"Blocking mode: python {getch_file}\n" f"Non-blocking mode: python {getch_file} False\n") getch_loop(is_blocking=False if len(args) and args[0] == 'False' else True)
stream2py/utility/getch.py
__all__ = ['getch'] class _Getch: """ Gets a single character from standard input. Does not echo to the screen. """ def __init__(self, is_blocking=True): try: self.impl = _GetchWindows(is_blocking) except ImportError: self.impl = _GetchUnix(is_blocking) def __getattr__(self, attr): return getattr(self.impl, attr) def __call__(self): return self.impl() class _GetchUnix: def __init__(self, is_blocking): import tty, sys, termios self.fd = sys.stdin.fileno() self.old_settings = termios.tcgetattr(self.fd) if is_blocking is True: self._getch = self.blocking else: self._getch = self.non_blocking def blocking(self): import sys, tty, termios fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch def non_blocking(self): import sys, tty, termios old_settings = termios.tcgetattr(sys.stdin) ch = None try: tty.setcbreak(sys.stdin.fileno()) if self._is_data(): ch = sys.stdin.read(1) finally: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings) return ch def restore_settings(self): import sys, tty, termios termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings) @staticmethod def _is_data(): import select, sys return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []) def __call__(self): return self._getch() class _GetchWindows: def __init__(self, is_blocking): import msvcrt if is_blocking is True: self._getch = self.blocking else: self._getch = self.non_blocking def blocking(self): import msvcrt return msvcrt.getch() def non_blocking(self): import msvcrt if msvcrt.kbhit(): return msvcrt.getch() def restore_settings(self): pass def __call__(self): return self._getch() getch = _Getch() if __name__ == "__main__": import sys def getch_loop(is_blocking=True): print(f'{"Blocking" if is_blocking is True else "Non-blocking"} getch! Press any key! Esc to quit!') i = 0 getch_func = getch.blocking if is_blocking is True else getch.non_blocking while True: char = getch_func() if char or i % 15000 == 0: print(f'{i}: {char}') if char == '\x1b': # ESC key break i += 1 getch_file, *args = sys.argv print("Getch! Echo key press usage:\n" f"Blocking mode: python {getch_file}\n" f"Non-blocking mode: python {getch_file} False\n") getch_loop(is_blocking=False if len(args) and args[0] == 'False' else True)
0.444444
0.119459
import click import shutil import os import re import subprocess from collections import defaultdict from typing import List, Dict, FrozenSet import networkx as nx import matplotlib.pyplot as plt from networkx.drawing.nx_agraph import graphviz_layout from networkx.algorithms.simple_paths import all_simple_paths class Node: pass class FileNode(Node): def __init__(self, filename): self.filename = filename self.id = self.filename self.label = os.path.basename(self.filename) if self.label == '': assert os.path.dirname(self.filename) == os.path.normpath(self.filename) self.label = os.path.split(os.path.normpath(self.filename))[1] self.color = (0., 1., 0.) class InputsNode(Node): def __init__(self, inputs: FrozenSet[FileNode]): self.inputs = inputs self.id = str(hash(self.inputs)) self.label = '' self.color = (0., 0., 0.) class RuleNode(Node): def __init__(self, name): self.name = name self.id = self.name self.label = self.name self.color = (1., 0., 0.) nodes: Dict[str, Node] = dict() edges: Dict[Node, List[Node]] = defaultdict(list) graph = nx.DiGraph() label_dict: Dict[str, str] = dict() def build_graph(rule): # os.chdir('/data/l989o/deployed/spatial_uzh') try: s = subprocess.check_output(f'/data/l989o/miniconda3/envs/spatial_uzh2/bin/snakemake {rule} --forceall --rerun-incomplete -n', shell=True).decode('utf-8') # print(s) except subprocess.CalledProcessError as grepexc: print('error code', grepexc.returncode, grepexc.output) raise subprocess.CalledProcessError(grepexc) r0 = re.compile(r'Building DAG of jobs...\nJob counts:\n\tcount\tjobs\n((?:\t[0-9]+\t[_a-zA-Z0-9]+\n)+)\t[0-9]+\n([\s\S]*?)\nThis was a dry-run \(flag -n\). The order of jobs does not reflect the order of execution.') m0 = re.match(r0, s) g0, g1 = m0.groups() lines = g0.split('\n') lines = [s.strip() for s in lines] lines = [s for s in lines if s != ''] r1 = re.compile('[0-9]+\t([_a-zA-Z0-9]+)') for line in lines: m1 = re.match(r1, line) rule_name = m1.groups()[0] # graph.add_node(rule_name) # print(rule_name) # v = RuleNode(rule_name) # rule_nodes[v] = rule_name # rule_nodes.append(v) lines = g1.split('\n\n') assert lines[-1].startswith('Job counts:') del lines[-1] for line in lines: ss = line.split('\n') ss = [s.strip() for s in ss] rule_node = None for s in ss: if s.startswith('rule'): assert rule_node is None rule_name = re.match(r'rule\ ([_a-zA-Z0-9]+):', s).groups()[0] rule_node = RuleNode(rule_name) graph.add_node(rule_node.id) nodes[rule_node.id] = rule_node label_dict[rule_node.id] = rule_node.label # print(rule_name) elif s.startswith('localrule'): assert rule_node is None rule_name = re.match(r'localrule\ ([_a-zA-Z0-9]+):', s).groups()[0] rule_node = RuleNode(rule_name) graph.add_node(rule_node.id) nodes[rule_node.id] = rule_node label_dict[rule_node.id] = rule_node.label elif s.startswith('input: '): inputs = s[len('input: '):].split(', ') inputs = sorted(inputs) # print(inputs) assert rule_node is not None # inputs_node = InputsNode(frozenset(inputs)) # edges[inputs_node].append(rule_node) # graph.add_edge(inputs_node.id, rule_node.id) # nodes[inputs_node.id] = inputs_node # label_dict[inputs_node.id] = inputs_node.label for x in inputs: file_node = FileNode(x) edges[file_node].append(rule_node) graph.add_edge(file_node.id, rule_node.id) # edges[file_node].append(inputs_node) # graph.add_edge(file_node.id, inputs_node.id) nodes[file_node.id] = file_node label_dict[file_node.id] = file_node.label # file_nodes[file_node] = x elif s.startswith('output: '): outputs = s[len('output: '):].split(', ') assert rule_node is not None for x in outputs: file_node = FileNode(x) edges[rule_node].append(file_node) graph.add_edge(rule_node.id, file_node.id) nodes[file_node.id] = file_node label_dict[file_node.id] = file_node.label # print(outputs) def find_subgraph(node0, node1): assert nx.algorithms.is_directed_acyclic_graph(graph) if node0 is None and node1 is None: return None elif node0 is not None and node1 is None: nodes_of_paths = nx.algorithms.descendants(graph, node0) nodes_of_paths.add(node0) else: paths = all_simple_paths(graph, node0, node1) nodes_of_paths = [] for path in paths: nodes_of_paths.extend(list(path)) subgraph = graph.subgraph(nodes_of_paths) return subgraph def _plot(subgraph=None): plt.figure(figsize=(20, 11)) # pos = nx.spring_layout(graph) colors = [nodes[node_id].color for node_id in graph.nodes()] pos = graphviz_layout(graph) nx.draw_networkx(graph, pos, with_labels=True, labels=label_dict, font_size=7, arrowstyle='-|>', arrowsize=20, arrows=True, node_color=colors) if subgraph is not None: orange = (1.0, 0.6823529411764706, 0.25882352941176473, 0.8) nx.draw_networkx_edges(subgraph, pos, edge_color=orange, width=3, arrowstyle='-|>', arrowsize=20, arrows=True) # plt.savefig('simple_path.png') plt.show() def _rm_command(subgraph): assert len(list(nx.isolates(subgraph))) == 0 to_rm = [] for node in subgraph.nodes(): obj = nodes[node] if type(obj) == FileNode: to_rm.append(obj.filename) assert all([' ' not in f for f in to_rm]) to_rm = sorted(to_rm, key=lambda x: x[::-1]) print('ready to delete the following files:') print('\n'.join(to_rm)) if click.confirm('do you want to continue?', default=True): for f in to_rm: if os.path.isdir(f): print('skipping directory:', f) else: if os.path.exists(f): assert os.path.isfile(f) os.remove(f) print('removed:', f) else: print('skipping non-existing:', f) # print(f'for f in {" ".join(to_rm)}; do rm $f; done') @click.command() @click.option('--rule', type=str, required=True, help='snakemake rule used to build the dag') @click.option('--node0', type=str, required=False, help='str (rule name or full path)', default=None) @click.option('--node1', type=str, required=False, help='str (rule name or full path)', default=None) def plot(rule, node0, node1): assert not (node1 is not None and node0 is None) build_graph(rule) subgraph = find_subgraph(node0, node1) _plot(subgraph) @click.command() @click.option('--rule', type=str, required=True, help='snakemake rule used to build the dag') @click.option('--node0', type=str, required=False, help='str (rule name or full path)', default=None) @click.option('--node1', type=str, required=False, help='str (rule name or full path)', default=None) @click.option('--plot', type=bool, required=False, help='plot the graph', default=False) def rm_command(rule, node0, node1, plot): build_graph(rule) subgraph = find_subgraph(node0, node1) if plot: _plot(subgraph) _rm_command(subgraph) @click.group() def cli(): pass cli.add_command(plot) cli.add_command(rm_command) if __name__ == '__main__': cli()
snakemake_workaround.py
import click import shutil import os import re import subprocess from collections import defaultdict from typing import List, Dict, FrozenSet import networkx as nx import matplotlib.pyplot as plt from networkx.drawing.nx_agraph import graphviz_layout from networkx.algorithms.simple_paths import all_simple_paths class Node: pass class FileNode(Node): def __init__(self, filename): self.filename = filename self.id = self.filename self.label = os.path.basename(self.filename) if self.label == '': assert os.path.dirname(self.filename) == os.path.normpath(self.filename) self.label = os.path.split(os.path.normpath(self.filename))[1] self.color = (0., 1., 0.) class InputsNode(Node): def __init__(self, inputs: FrozenSet[FileNode]): self.inputs = inputs self.id = str(hash(self.inputs)) self.label = '' self.color = (0., 0., 0.) class RuleNode(Node): def __init__(self, name): self.name = name self.id = self.name self.label = self.name self.color = (1., 0., 0.) nodes: Dict[str, Node] = dict() edges: Dict[Node, List[Node]] = defaultdict(list) graph = nx.DiGraph() label_dict: Dict[str, str] = dict() def build_graph(rule): # os.chdir('/data/l989o/deployed/spatial_uzh') try: s = subprocess.check_output(f'/data/l989o/miniconda3/envs/spatial_uzh2/bin/snakemake {rule} --forceall --rerun-incomplete -n', shell=True).decode('utf-8') # print(s) except subprocess.CalledProcessError as grepexc: print('error code', grepexc.returncode, grepexc.output) raise subprocess.CalledProcessError(grepexc) r0 = re.compile(r'Building DAG of jobs...\nJob counts:\n\tcount\tjobs\n((?:\t[0-9]+\t[_a-zA-Z0-9]+\n)+)\t[0-9]+\n([\s\S]*?)\nThis was a dry-run \(flag -n\). The order of jobs does not reflect the order of execution.') m0 = re.match(r0, s) g0, g1 = m0.groups() lines = g0.split('\n') lines = [s.strip() for s in lines] lines = [s for s in lines if s != ''] r1 = re.compile('[0-9]+\t([_a-zA-Z0-9]+)') for line in lines: m1 = re.match(r1, line) rule_name = m1.groups()[0] # graph.add_node(rule_name) # print(rule_name) # v = RuleNode(rule_name) # rule_nodes[v] = rule_name # rule_nodes.append(v) lines = g1.split('\n\n') assert lines[-1].startswith('Job counts:') del lines[-1] for line in lines: ss = line.split('\n') ss = [s.strip() for s in ss] rule_node = None for s in ss: if s.startswith('rule'): assert rule_node is None rule_name = re.match(r'rule\ ([_a-zA-Z0-9]+):', s).groups()[0] rule_node = RuleNode(rule_name) graph.add_node(rule_node.id) nodes[rule_node.id] = rule_node label_dict[rule_node.id] = rule_node.label # print(rule_name) elif s.startswith('localrule'): assert rule_node is None rule_name = re.match(r'localrule\ ([_a-zA-Z0-9]+):', s).groups()[0] rule_node = RuleNode(rule_name) graph.add_node(rule_node.id) nodes[rule_node.id] = rule_node label_dict[rule_node.id] = rule_node.label elif s.startswith('input: '): inputs = s[len('input: '):].split(', ') inputs = sorted(inputs) # print(inputs) assert rule_node is not None # inputs_node = InputsNode(frozenset(inputs)) # edges[inputs_node].append(rule_node) # graph.add_edge(inputs_node.id, rule_node.id) # nodes[inputs_node.id] = inputs_node # label_dict[inputs_node.id] = inputs_node.label for x in inputs: file_node = FileNode(x) edges[file_node].append(rule_node) graph.add_edge(file_node.id, rule_node.id) # edges[file_node].append(inputs_node) # graph.add_edge(file_node.id, inputs_node.id) nodes[file_node.id] = file_node label_dict[file_node.id] = file_node.label # file_nodes[file_node] = x elif s.startswith('output: '): outputs = s[len('output: '):].split(', ') assert rule_node is not None for x in outputs: file_node = FileNode(x) edges[rule_node].append(file_node) graph.add_edge(rule_node.id, file_node.id) nodes[file_node.id] = file_node label_dict[file_node.id] = file_node.label # print(outputs) def find_subgraph(node0, node1): assert nx.algorithms.is_directed_acyclic_graph(graph) if node0 is None and node1 is None: return None elif node0 is not None and node1 is None: nodes_of_paths = nx.algorithms.descendants(graph, node0) nodes_of_paths.add(node0) else: paths = all_simple_paths(graph, node0, node1) nodes_of_paths = [] for path in paths: nodes_of_paths.extend(list(path)) subgraph = graph.subgraph(nodes_of_paths) return subgraph def _plot(subgraph=None): plt.figure(figsize=(20, 11)) # pos = nx.spring_layout(graph) colors = [nodes[node_id].color for node_id in graph.nodes()] pos = graphviz_layout(graph) nx.draw_networkx(graph, pos, with_labels=True, labels=label_dict, font_size=7, arrowstyle='-|>', arrowsize=20, arrows=True, node_color=colors) if subgraph is not None: orange = (1.0, 0.6823529411764706, 0.25882352941176473, 0.8) nx.draw_networkx_edges(subgraph, pos, edge_color=orange, width=3, arrowstyle='-|>', arrowsize=20, arrows=True) # plt.savefig('simple_path.png') plt.show() def _rm_command(subgraph): assert len(list(nx.isolates(subgraph))) == 0 to_rm = [] for node in subgraph.nodes(): obj = nodes[node] if type(obj) == FileNode: to_rm.append(obj.filename) assert all([' ' not in f for f in to_rm]) to_rm = sorted(to_rm, key=lambda x: x[::-1]) print('ready to delete the following files:') print('\n'.join(to_rm)) if click.confirm('do you want to continue?', default=True): for f in to_rm: if os.path.isdir(f): print('skipping directory:', f) else: if os.path.exists(f): assert os.path.isfile(f) os.remove(f) print('removed:', f) else: print('skipping non-existing:', f) # print(f'for f in {" ".join(to_rm)}; do rm $f; done') @click.command() @click.option('--rule', type=str, required=True, help='snakemake rule used to build the dag') @click.option('--node0', type=str, required=False, help='str (rule name or full path)', default=None) @click.option('--node1', type=str, required=False, help='str (rule name or full path)', default=None) def plot(rule, node0, node1): assert not (node1 is not None and node0 is None) build_graph(rule) subgraph = find_subgraph(node0, node1) _plot(subgraph) @click.command() @click.option('--rule', type=str, required=True, help='snakemake rule used to build the dag') @click.option('--node0', type=str, required=False, help='str (rule name or full path)', default=None) @click.option('--node1', type=str, required=False, help='str (rule name or full path)', default=None) @click.option('--plot', type=bool, required=False, help='plot the graph', default=False) def rm_command(rule, node0, node1, plot): build_graph(rule) subgraph = find_subgraph(node0, node1) if plot: _plot(subgraph) _rm_command(subgraph) @click.group() def cli(): pass cli.add_command(plot) cli.add_command(rm_command) if __name__ == '__main__': cli()
0.367611
0.312882
import json from typing import Any, Dict, List, Optional from dateutil.relativedelta import relativedelta from django.contrib.postgres.fields import JSONField from django.core.exceptions import EmptyResultSet from django.db import connection, models, transaction from django.db.models import Q from django.db.models.expressions import F from django.utils import timezone from sentry_sdk import capture_exception from posthog.ee import is_ee_enabled from .action import Action from .event import Event from .filters import Filter from .person import Person DELETE_QUERY = """ DELETE FROM "posthog_cohortpeople" WHERE "cohort_id" = {cohort_id}; """ UPDATE_QUERY = """ INSERT INTO "posthog_cohortpeople" ("person_id", "cohort_id") {values_query} ON CONFLICT DO NOTHING """ class Group(object): def __init__( self, properties: Optional[Dict[str, Any]] = None, action_id: Optional[int] = None, days: Optional[int] = None, ): if not properties and not action_id: raise ValueError("Cohort group needs properties or action_id") self.properties = properties self.action_id = action_id self.days = days class CohortManager(models.Manager): def create(self, *args: Any, **kwargs: Any): if kwargs.get("groups"): kwargs["groups"] = [Group(**group).__dict__ for group in kwargs["groups"]] cohort = super().create(*args, **kwargs) return cohort class Cohort(models.Model): name: models.CharField = models.CharField(max_length=400, null=True, blank=True) team: models.ForeignKey = models.ForeignKey("Team", on_delete=models.CASCADE) deleted: models.BooleanField = models.BooleanField(default=False) groups: JSONField = JSONField(default=list) people: models.ManyToManyField = models.ManyToManyField("Person", through="CohortPeople") created_by: models.ForeignKey = models.ForeignKey("User", on_delete=models.SET_NULL, blank=True, null=True) created_at: models.DateTimeField = models.DateTimeField(default=timezone.now, blank=True, null=True) is_calculating: models.BooleanField = models.BooleanField(default=False) last_calculation: models.DateTimeField = models.DateTimeField(blank=True, null=True) errors_calculating: models.IntegerField = models.IntegerField(default=0) is_static: models.BooleanField = models.BooleanField(default=False) objects = CohortManager() def get_analytics_metadata(self): action_groups_count: int = 0 properties_groups_count: int = 0 for group in self.groups: action_groups_count += 1 if group.get("action_id") else 0 properties_groups_count += 1 if group.get("properties") else 0 return { "name_length": len(self.name) if self.name else 0, "person_count_precalc": self.people.count(), "groups_count": len(self.groups), "action_groups_count": action_groups_count, "properties_groups_count": properties_groups_count, "deleted": self.deleted, } def calculate_people(self, use_clickhouse=is_ee_enabled()): if self.is_static: return try: if not use_clickhouse: self.is_calculating = True self.save() persons_query = self._clickhouse_persons_query() if use_clickhouse else self._postgres_persons_query() try: sql, params = persons_query.distinct("pk").only("pk").query.sql_with_params() except EmptyResultSet: query = DELETE_QUERY.format(cohort_id=self.pk) params = {} else: query = "{}{}".format(DELETE_QUERY, UPDATE_QUERY).format( cohort_id=self.pk, values_query=sql.replace('FROM "posthog_person"', ', {} FROM "posthog_person"'.format(self.pk), 1,), ) cursor = connection.cursor() with transaction.atomic(): cursor.execute(query, params) self.is_calculating = False self.last_calculation = timezone.now() self.errors_calculating = 0 self.save() except Exception: self.is_calculating = False self.errors_calculating = F("errors_calculating") + 1 self.save() capture_exception() def insert_users_by_list(self, items: List[str]) -> None: """ Items can be distinct_id or email """ batchsize = 1000 use_clickhouse = is_ee_enabled() if use_clickhouse: from ee.clickhouse.models.cohort import insert_static_cohort try: cursor = connection.cursor() for i in range(0, len(items), batchsize): batch = items[i : i + batchsize] persons_query = ( Person.objects.filter(team_id=self.team_id) .filter(Q(persondistinctid__team_id=self.team_id, persondistinctid__distinct_id__in=batch)) .exclude(cohort__id=self.id) ) if use_clickhouse: insert_static_cohort([p for p in persons_query.values_list("uuid", flat=True)], self.pk, self.team) sql, params = persons_query.distinct("pk").only("pk").query.sql_with_params() query = UPDATE_QUERY.format( cohort_id=self.pk, values_query=sql.replace('FROM "posthog_person"', ', {} FROM "posthog_person"'.format(self.pk), 1,), ) cursor.execute(query, params) self.is_calculating = False self.last_calculation = timezone.now() self.errors_calculating = 0 self.save() except Exception: self.is_calculating = False self.errors_calculating = F("errors_calculating") + 1 self.save() capture_exception() def __str__(self): return self.name def _clickhouse_persons_query(self): from ee.clickhouse.models.cohort import get_person_ids_by_cohort_id uuids = get_person_ids_by_cohort_id(team=self.team, cohort_id=self.pk) return Person.objects.filter(uuid__in=uuids, team=self.team) def _postgres_persons_query(self): return Person.objects.filter(self._people_filter(), team=self.team) def _people_filter(self, extra_filter=None): from posthog.queries.base import properties_to_Q filters = Q() for group in self.groups: if group.get("action_id"): action = Action.objects.get(pk=group["action_id"], team_id=self.team_id) events = ( Event.objects.filter_by_action(action) .filter( team_id=self.team_id, **( {"timestamp__gt": timezone.now() - relativedelta(days=int(group["days"]))} if group.get("days") else {} ), **(extra_filter if extra_filter else {}) ) .order_by("distinct_id") .distinct("distinct_id") .values("distinct_id") ) filters |= Q(persondistinctid__distinct_id__in=events) elif group.get("properties"): filter = Filter(data=group) filters |= Q(properties_to_Q(filter.properties, team_id=self.team_id, is_person_query=True)) return filters class CohortPeople(models.Model): id: models.BigAutoField = models.BigAutoField(primary_key=True) cohort: models.ForeignKey = models.ForeignKey("Cohort", on_delete=models.CASCADE) person: models.ForeignKey = models.ForeignKey("Person", on_delete=models.CASCADE) class Meta: indexes = [ models.Index(fields=["cohort_id", "person_id"]), ]
posthog/models/cohort.py
import json from typing import Any, Dict, List, Optional from dateutil.relativedelta import relativedelta from django.contrib.postgres.fields import JSONField from django.core.exceptions import EmptyResultSet from django.db import connection, models, transaction from django.db.models import Q from django.db.models.expressions import F from django.utils import timezone from sentry_sdk import capture_exception from posthog.ee import is_ee_enabled from .action import Action from .event import Event from .filters import Filter from .person import Person DELETE_QUERY = """ DELETE FROM "posthog_cohortpeople" WHERE "cohort_id" = {cohort_id}; """ UPDATE_QUERY = """ INSERT INTO "posthog_cohortpeople" ("person_id", "cohort_id") {values_query} ON CONFLICT DO NOTHING """ class Group(object): def __init__( self, properties: Optional[Dict[str, Any]] = None, action_id: Optional[int] = None, days: Optional[int] = None, ): if not properties and not action_id: raise ValueError("Cohort group needs properties or action_id") self.properties = properties self.action_id = action_id self.days = days class CohortManager(models.Manager): def create(self, *args: Any, **kwargs: Any): if kwargs.get("groups"): kwargs["groups"] = [Group(**group).__dict__ for group in kwargs["groups"]] cohort = super().create(*args, **kwargs) return cohort class Cohort(models.Model): name: models.CharField = models.CharField(max_length=400, null=True, blank=True) team: models.ForeignKey = models.ForeignKey("Team", on_delete=models.CASCADE) deleted: models.BooleanField = models.BooleanField(default=False) groups: JSONField = JSONField(default=list) people: models.ManyToManyField = models.ManyToManyField("Person", through="CohortPeople") created_by: models.ForeignKey = models.ForeignKey("User", on_delete=models.SET_NULL, blank=True, null=True) created_at: models.DateTimeField = models.DateTimeField(default=timezone.now, blank=True, null=True) is_calculating: models.BooleanField = models.BooleanField(default=False) last_calculation: models.DateTimeField = models.DateTimeField(blank=True, null=True) errors_calculating: models.IntegerField = models.IntegerField(default=0) is_static: models.BooleanField = models.BooleanField(default=False) objects = CohortManager() def get_analytics_metadata(self): action_groups_count: int = 0 properties_groups_count: int = 0 for group in self.groups: action_groups_count += 1 if group.get("action_id") else 0 properties_groups_count += 1 if group.get("properties") else 0 return { "name_length": len(self.name) if self.name else 0, "person_count_precalc": self.people.count(), "groups_count": len(self.groups), "action_groups_count": action_groups_count, "properties_groups_count": properties_groups_count, "deleted": self.deleted, } def calculate_people(self, use_clickhouse=is_ee_enabled()): if self.is_static: return try: if not use_clickhouse: self.is_calculating = True self.save() persons_query = self._clickhouse_persons_query() if use_clickhouse else self._postgres_persons_query() try: sql, params = persons_query.distinct("pk").only("pk").query.sql_with_params() except EmptyResultSet: query = DELETE_QUERY.format(cohort_id=self.pk) params = {} else: query = "{}{}".format(DELETE_QUERY, UPDATE_QUERY).format( cohort_id=self.pk, values_query=sql.replace('FROM "posthog_person"', ', {} FROM "posthog_person"'.format(self.pk), 1,), ) cursor = connection.cursor() with transaction.atomic(): cursor.execute(query, params) self.is_calculating = False self.last_calculation = timezone.now() self.errors_calculating = 0 self.save() except Exception: self.is_calculating = False self.errors_calculating = F("errors_calculating") + 1 self.save() capture_exception() def insert_users_by_list(self, items: List[str]) -> None: """ Items can be distinct_id or email """ batchsize = 1000 use_clickhouse = is_ee_enabled() if use_clickhouse: from ee.clickhouse.models.cohort import insert_static_cohort try: cursor = connection.cursor() for i in range(0, len(items), batchsize): batch = items[i : i + batchsize] persons_query = ( Person.objects.filter(team_id=self.team_id) .filter(Q(persondistinctid__team_id=self.team_id, persondistinctid__distinct_id__in=batch)) .exclude(cohort__id=self.id) ) if use_clickhouse: insert_static_cohort([p for p in persons_query.values_list("uuid", flat=True)], self.pk, self.team) sql, params = persons_query.distinct("pk").only("pk").query.sql_with_params() query = UPDATE_QUERY.format( cohort_id=self.pk, values_query=sql.replace('FROM "posthog_person"', ', {} FROM "posthog_person"'.format(self.pk), 1,), ) cursor.execute(query, params) self.is_calculating = False self.last_calculation = timezone.now() self.errors_calculating = 0 self.save() except Exception: self.is_calculating = False self.errors_calculating = F("errors_calculating") + 1 self.save() capture_exception() def __str__(self): return self.name def _clickhouse_persons_query(self): from ee.clickhouse.models.cohort import get_person_ids_by_cohort_id uuids = get_person_ids_by_cohort_id(team=self.team, cohort_id=self.pk) return Person.objects.filter(uuid__in=uuids, team=self.team) def _postgres_persons_query(self): return Person.objects.filter(self._people_filter(), team=self.team) def _people_filter(self, extra_filter=None): from posthog.queries.base import properties_to_Q filters = Q() for group in self.groups: if group.get("action_id"): action = Action.objects.get(pk=group["action_id"], team_id=self.team_id) events = ( Event.objects.filter_by_action(action) .filter( team_id=self.team_id, **( {"timestamp__gt": timezone.now() - relativedelta(days=int(group["days"]))} if group.get("days") else {} ), **(extra_filter if extra_filter else {}) ) .order_by("distinct_id") .distinct("distinct_id") .values("distinct_id") ) filters |= Q(persondistinctid__distinct_id__in=events) elif group.get("properties"): filter = Filter(data=group) filters |= Q(properties_to_Q(filter.properties, team_id=self.team_id, is_person_query=True)) return filters class CohortPeople(models.Model): id: models.BigAutoField = models.BigAutoField(primary_key=True) cohort: models.ForeignKey = models.ForeignKey("Cohort", on_delete=models.CASCADE) person: models.ForeignKey = models.ForeignKey("Person", on_delete=models.CASCADE) class Meta: indexes = [ models.Index(fields=["cohort_id", "person_id"]), ]
0.699049
0.111676
import analyzer_lib.utils.utils as u import analyzer_lib.data_manipulation.tables as t from pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType, DecimalType, DoubleType, FloatType from pyspark.sql import DataFrame, Row from pyspark.sql import functions as F import configparser import sys # read information from configs config = configparser.ConfigParser() config.read('conf.ini') file_dir = config['local']['file_dir'] company_relations_file = config['local']['company_relations_file'] land_ownership_file = config['local']['land_ownership_file'] hierarchical_structure_file_name = config['local']['hierarchical_structure_file_name'] # define schema for input files cr_schema = StructType([ StructField("company_id", StringType(), True), StructField("name", StringType(), True), StructField("parent", StringType(), True) ]) lo_schema = StructType([ StructField("land_id", StringType(), True), StructField("company_id", StringType(), True) ]) hierarchical_structure_schema = StructType([ StructField("company_id", StringType(), True), StructField("name", StringType(), True), StructField("description", StringType(), True), StructField("hierarchical_structure", StringType(), True) ]) def run_with_rebuild(arg_file_dir: str, arg_company_relations_file: str, arg_cr_schema: StructType, arg_land_ownership_file:str, arg_lo_schema: StructType, arg_hierarchical_structure_file_name) -> DataFrame: # READ DFs pathcr = f"{arg_file_dir}/{arg_company_relations_file}" pathlo = f"{arg_file_dir}/{arg_land_ownership_file}" df_cr = u.load_df(arg_cr_schema, pathcr)\ .select(F.col("company_id").alias("company_id_cr"), F.col("name"), F.col("parent")) df_lo = u.load_df(arg_lo_schema, pathlo)\ .select(F.col("land_id"), F.col("company_id").alias("company_id_lo")) # ENRICH DFs WITH MORE INFORMATION parent_df = t.create_df_with_parent_child_lists(df_cr) company_land_count = t.create_df_with_land_parcels_ownership_count(df_lo) company_df = t.create_df_with_full_description(df_cr, company_land_count) data_frame = parent_df list_of_roots = None # GET THE LIST OF ROOTS try: list_of_roots = data_frame\ .where( (F.col("parent")=="") | (F.col("parent").isNull()) )\ .select(F.col("company_list"))\ .first()[0] except Exception as e: print(e) # BUILD HIERARICHCAL STRUCTURE FOR EACH ROOT - TOP DOWN if list_of_roots is not None: company_hierarchical_structure_dict = u.build_list_of_dict_with_hierarchical_structure_for_each_root(data_frame, list_of_roots) hierarchical_structure_df = u.create_company_hierarchical_structure_df(company_hierarchical_structure_dict) else: print(f"no roots in the list of roots in the df: {data_frame}") # BUILD FINAL ENRICHED TABLE df_total = company_df.join(hierarchical_structure_df, company_df.company_id==hierarchical_structure_df.id, how="left")\ .drop(F.col("parent_2"))\ .drop(F.col("id")) u.write_df(df_total, arg_file_dir, arg_hierarchical_structure_file_name) return df_total if __name__ == '__main__': # example python main.py CR995643170992 rebuild if len(sys.argv) > 2 and sys.argv[2] == "rebuild": print("building of hierarchical_structure_file_name has started and may take few minutes ... ") df = run_with_rebuild(file_dir, company_relations_file, cr_schema, land_ownership_file, lo_schema, hierarchical_structure_file_name) print("building of hierarchical_structure_file_name has finished") u.print_dict_for_company_id(df, sys.argv[1]) elif len(sys.argv) == 2: df = u.load_df(hierarchical_structure_schema, f"{file_dir}/{hierarchical_structure_file_name}") u.print_dict_for_company_id(df, sys.argv[1])
src/main.py
import analyzer_lib.utils.utils as u import analyzer_lib.data_manipulation.tables as t from pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType, DecimalType, DoubleType, FloatType from pyspark.sql import DataFrame, Row from pyspark.sql import functions as F import configparser import sys # read information from configs config = configparser.ConfigParser() config.read('conf.ini') file_dir = config['local']['file_dir'] company_relations_file = config['local']['company_relations_file'] land_ownership_file = config['local']['land_ownership_file'] hierarchical_structure_file_name = config['local']['hierarchical_structure_file_name'] # define schema for input files cr_schema = StructType([ StructField("company_id", StringType(), True), StructField("name", StringType(), True), StructField("parent", StringType(), True) ]) lo_schema = StructType([ StructField("land_id", StringType(), True), StructField("company_id", StringType(), True) ]) hierarchical_structure_schema = StructType([ StructField("company_id", StringType(), True), StructField("name", StringType(), True), StructField("description", StringType(), True), StructField("hierarchical_structure", StringType(), True) ]) def run_with_rebuild(arg_file_dir: str, arg_company_relations_file: str, arg_cr_schema: StructType, arg_land_ownership_file:str, arg_lo_schema: StructType, arg_hierarchical_structure_file_name) -> DataFrame: # READ DFs pathcr = f"{arg_file_dir}/{arg_company_relations_file}" pathlo = f"{arg_file_dir}/{arg_land_ownership_file}" df_cr = u.load_df(arg_cr_schema, pathcr)\ .select(F.col("company_id").alias("company_id_cr"), F.col("name"), F.col("parent")) df_lo = u.load_df(arg_lo_schema, pathlo)\ .select(F.col("land_id"), F.col("company_id").alias("company_id_lo")) # ENRICH DFs WITH MORE INFORMATION parent_df = t.create_df_with_parent_child_lists(df_cr) company_land_count = t.create_df_with_land_parcels_ownership_count(df_lo) company_df = t.create_df_with_full_description(df_cr, company_land_count) data_frame = parent_df list_of_roots = None # GET THE LIST OF ROOTS try: list_of_roots = data_frame\ .where( (F.col("parent")=="") | (F.col("parent").isNull()) )\ .select(F.col("company_list"))\ .first()[0] except Exception as e: print(e) # BUILD HIERARICHCAL STRUCTURE FOR EACH ROOT - TOP DOWN if list_of_roots is not None: company_hierarchical_structure_dict = u.build_list_of_dict_with_hierarchical_structure_for_each_root(data_frame, list_of_roots) hierarchical_structure_df = u.create_company_hierarchical_structure_df(company_hierarchical_structure_dict) else: print(f"no roots in the list of roots in the df: {data_frame}") # BUILD FINAL ENRICHED TABLE df_total = company_df.join(hierarchical_structure_df, company_df.company_id==hierarchical_structure_df.id, how="left")\ .drop(F.col("parent_2"))\ .drop(F.col("id")) u.write_df(df_total, arg_file_dir, arg_hierarchical_structure_file_name) return df_total if __name__ == '__main__': # example python main.py CR995643170992 rebuild if len(sys.argv) > 2 and sys.argv[2] == "rebuild": print("building of hierarchical_structure_file_name has started and may take few minutes ... ") df = run_with_rebuild(file_dir, company_relations_file, cr_schema, land_ownership_file, lo_schema, hierarchical_structure_file_name) print("building of hierarchical_structure_file_name has finished") u.print_dict_for_company_id(df, sys.argv[1]) elif len(sys.argv) == 2: df = u.load_df(hierarchical_structure_schema, f"{file_dir}/{hierarchical_structure_file_name}") u.print_dict_for_company_id(df, sys.argv[1])
0.313945
0.268654
from __future__ import annotations from collections import UserDict, defaultdict from typing import ( Callable, ClassVar, Dict, Generic, Iterable, Mapping, Optional, Sequence, Set, Type, TypeVar, Union, ) K = TypeVar("K") V = TypeVar("V") class Bag(UserDict, Generic[K, V]): """ A bag is essentially a wrapper of `defaultdict(set)` with the following additional functionality: `self[key] = item` is equivalent to `self[key].add(item)` """ data: Dict[K, Set[V]] def __init__(self) -> None: self.data = defaultdict(set) def bag(self) -> Dict[K, Set[V]]: return self.data def __setitem__(self, key: K, item: V) -> None: return self.data[key].add(item) IndexFactory = Callable[[], Mapping[K, Iterable[V]]] class BidirectionalIndex(UserDict, Generic[K, V]): """ BidirectionalIndex has two indexing system: forward index and inverted index. For example, consider a list of documents. Forward index means finding the set of words from a document while inverted index means finding the set of documents containing that word. Notes -------- Only __getitem__ and __setitem__ are implemented in this base class. Additional functionalities should be provided by subclasses. See Also -------- [Inverted Index](https://en.wikipedia.org/wiki/Inverted_index) """ DEFAULT_FORWARD_INDEX_FACTORY: ClassVar[Type[Bag[K, V]]] = Bag DEFAULT_INVERTED_INDEX_FACTORY: ClassVar[Type[Bag[K, V]]] = Bag def __init__( self, forward_index_factory: Optional[IndexFactory[K, V]] = None, inverted_index_factory: Optional[IndexFactory[V, K]] = None, ) -> None: self.__init_index(forward_index_factory, inverted_index_factory) def __init_index( self, forward_index_factory: Optional[IndexFactory[K, V]] = None, inverted_index_factory: Optional[IndexFactory[V, K]] = None, ) -> None: if forward_index_factory is None: forward_index_factory = self.DEFAULT_FORWARD_INDEX_FACTORY if inverted_index_factory is None: inverted_index_factory = self.DEFAULT_INVERTED_INDEX_FACTORY self.data = forward_index_factory() self.inverted_index = inverted_index_factory() @property def forward_index(self) -> Mapping[K, V]: return self.data def __getitem__(self, key: Union[K, V]) -> Union[Sequence[V], Sequence[K]]: try: return self.forward_index[key] except KeyError: return self.inverted_index[key] def __setitem__(self, key: K, item: V) -> None: super().__setitem__(key, item) self.inverted_index[item] = key
streamlined/common/data_structures.py
from __future__ import annotations from collections import UserDict, defaultdict from typing import ( Callable, ClassVar, Dict, Generic, Iterable, Mapping, Optional, Sequence, Set, Type, TypeVar, Union, ) K = TypeVar("K") V = TypeVar("V") class Bag(UserDict, Generic[K, V]): """ A bag is essentially a wrapper of `defaultdict(set)` with the following additional functionality: `self[key] = item` is equivalent to `self[key].add(item)` """ data: Dict[K, Set[V]] def __init__(self) -> None: self.data = defaultdict(set) def bag(self) -> Dict[K, Set[V]]: return self.data def __setitem__(self, key: K, item: V) -> None: return self.data[key].add(item) IndexFactory = Callable[[], Mapping[K, Iterable[V]]] class BidirectionalIndex(UserDict, Generic[K, V]): """ BidirectionalIndex has two indexing system: forward index and inverted index. For example, consider a list of documents. Forward index means finding the set of words from a document while inverted index means finding the set of documents containing that word. Notes -------- Only __getitem__ and __setitem__ are implemented in this base class. Additional functionalities should be provided by subclasses. See Also -------- [Inverted Index](https://en.wikipedia.org/wiki/Inverted_index) """ DEFAULT_FORWARD_INDEX_FACTORY: ClassVar[Type[Bag[K, V]]] = Bag DEFAULT_INVERTED_INDEX_FACTORY: ClassVar[Type[Bag[K, V]]] = Bag def __init__( self, forward_index_factory: Optional[IndexFactory[K, V]] = None, inverted_index_factory: Optional[IndexFactory[V, K]] = None, ) -> None: self.__init_index(forward_index_factory, inverted_index_factory) def __init_index( self, forward_index_factory: Optional[IndexFactory[K, V]] = None, inverted_index_factory: Optional[IndexFactory[V, K]] = None, ) -> None: if forward_index_factory is None: forward_index_factory = self.DEFAULT_FORWARD_INDEX_FACTORY if inverted_index_factory is None: inverted_index_factory = self.DEFAULT_INVERTED_INDEX_FACTORY self.data = forward_index_factory() self.inverted_index = inverted_index_factory() @property def forward_index(self) -> Mapping[K, V]: return self.data def __getitem__(self, key: Union[K, V]) -> Union[Sequence[V], Sequence[K]]: try: return self.forward_index[key] except KeyError: return self.inverted_index[key] def __setitem__(self, key: K, item: V) -> None: super().__setitem__(key, item) self.inverted_index[item] = key
0.918279
0.363139
import numpy as np from functions.my_LLE import My_LLE import matplotlib.pyplot as plt import functions.utils as utils class My_GLLE_DirectSampling: def __init__(self, X, n_neighbors=10, n_components=None, path_save="./", verbosity=0): # X: rows are features and columns are samples self.n_components = n_components self.X = X self.n_samples = self.X.shape[1] self.n_dimensions = self.X.shape[0] self.n_neighbors = n_neighbors self.path_save = path_save self.w_linearReconstruction = None self.Cov_weights_linearReconstruction = None self.mean_weights_linearReconstruction = None self.neighbor_indices = None self.verbosity = verbosity def fit_transform(self, calculate_again=True): if calculate_again: self.stochastic_linear_reconstruction(calculate_again=calculate_again) if self.verbosity >= 1: print("Linear reconstruction is done...") X_transformed = self.linear_embedding() if self.verbosity >= 1: print("Linear embedding is done...") utils.save_variable(variable=X_transformed, name_of_variable="X_transformed", path_to_save=self.path_save) else: if self.verbosity >= 1: print("Loading previous embedding...") X_transformed = utils.load_variable(name_of_variable="X_transformed", path=self.path_save) return X_transformed def generate_again(self, Cov_weights_linearReconstruction=None, mean_weights_linearReconstruction=None): if self.verbosity >= 1: print("Generating a new embedding (unfolding)...") for sample_index in range(self.n_samples): if self.verbosity >= 1 and sample_index % 1000 == 0: if self.verbosity >= 2: print("processing sample {}/{}".format(sample_index,self.n_samples)) if Cov_weights_linearReconstruction is None: cov_w = self.Cov_weights_linearReconstruction[:, :, sample_index] else: cov_w = Cov_weights_linearReconstruction[:, :, sample_index] if mean_weights_linearReconstruction is None: mean_w = self.mean_weights_linearReconstruction[:, sample_index] else: mean_w = mean_weights_linearReconstruction[:, sample_index] #### sampling weights: self.w_linearReconstruction[sample_index, :] = np.random.multivariate_normal(mean=mean_w.ravel(), cov=cov_w, size=1) X_transformed = self.linear_embedding() return X_transformed def stochastic_linear_reconstruction(self, calculate_again=True): if calculate_again: my_LLE = My_LLE(X=self.X, n_neighbors=self.n_neighbors, n_components=self.n_components) Y_LLE = my_LLE.fit_transform() w_LLE = (my_LLE.w_linearReconstruction).T self.neighbor_indices = my_LLE.neighbor_indices # Phi_ = np.eye(self.n_neighbors) * 1e-10 Phi_ = 0 self.w_linearReconstruction = np.zeros((self.n_samples, self.n_neighbors)) self.mean_weights_linearReconstruction = np.zeros((self.n_neighbors, self.n_samples)) self.Cov_weights_linearReconstruction = np.zeros((self.n_neighbors, self.n_neighbors, self.n_samples)) for sample_index in range(self.n_samples): self.Cov_weights_linearReconstruction[:, :, sample_index] = np.eye(self.n_neighbors) for sample_index in range(self.n_samples): if self.verbosity >= 2 and sample_index % 1000 == 0: print("processing sample {}/{}".format(sample_index,self.n_samples)) neighbor_indices_of_this_sample = self.neighbor_indices[sample_index, :].astype(int) X_neighbors = self.X[:, neighbor_indices_of_this_sample] Y_neighbors = Y_LLE[:, neighbor_indices_of_this_sample] #### sampling w: cov_w = np.linalg.inv( (X_neighbors.T @ X_neighbors) + (Y_neighbors.T @ Y_neighbors) + Phi_ ) # mean_w = cov_w @ ( (X_neighbors.T @ x_i) + (Y_neighbors.T @ y) ) mean_w = w_LLE[:, sample_index] self.w_linearReconstruction[sample_index, :] = np.random.multivariate_normal(mean=mean_w.ravel(), cov=cov_w, size=1) self.Cov_weights_linearReconstruction[:, :, sample_index] = cov_w self.mean_weights_linearReconstruction[:, sample_index] = mean_w.ravel() utils.save_variable(variable=self.w_linearReconstruction, name_of_variable="w_linearReconstruction", path_to_save=self.path_save) utils.save_variable(variable=self.Cov_weights_linearReconstruction, name_of_variable="Cov_weights_linearReconstruction", path_to_save=self.path_save) utils.save_variable(variable=self.mean_weights_linearReconstruction, name_of_variable="mean_weights_linearReconstruction", path_to_save=self.path_save) utils.save_variable(variable=self.neighbor_indices, name_of_variable="neighbor_indices", path_to_save=self.path_save) else: self.w_linearReconstruction = utils.load_variable(name_of_variable="w_linearReconstruction", path=self.path_save) self.Cov_weights_linearReconstruction = utils.load_variable(name_of_variable="Cov_weights_linearReconstruction", path=self.path_save) self.mean_weights_linearReconstruction = utils.load_variable(name_of_variable="mean_weights_linearReconstruction", path=self.path_save) self.neighbor_indices = utils.load_variable(name_of_variable="neighbor_indices", path=self.path_save) def linear_embedding(self): self.W_linearEmbedding = np.zeros((self.n_samples, self.n_samples)) for sample_index in range(self.n_samples): neighbor_indices_of_this_sample = self.neighbor_indices[sample_index, :].astype(int) self.W_linearEmbedding[sample_index, neighbor_indices_of_this_sample] = self.w_linearReconstruction[sample_index, :].ravel() temp = np.eye(self.n_samples) - self.W_linearEmbedding M = (temp.T).dot(temp) eig_val, eig_vec = np.linalg.eigh(M) idx = eig_val.argsort() # sort eigenvalues in ascending order (smallest eigenvalue first) eig_val = eig_val[idx] eig_vec = eig_vec[:, idx] if self.n_components is not None: X_transformed = eig_vec[:, 1:self.n_components+1] #--> note that first eigenvalue is zero else: X_transformed = eig_vec[:, 1:] #--> note that first eigenvalue is zero X_transformed = X_transformed.T #--> the obtained Y in Laplacian eigenmap is row-wise vectors, so we transpose it return X_transformed
functions/my_GLLE_DirectSampling.py
import numpy as np from functions.my_LLE import My_LLE import matplotlib.pyplot as plt import functions.utils as utils class My_GLLE_DirectSampling: def __init__(self, X, n_neighbors=10, n_components=None, path_save="./", verbosity=0): # X: rows are features and columns are samples self.n_components = n_components self.X = X self.n_samples = self.X.shape[1] self.n_dimensions = self.X.shape[0] self.n_neighbors = n_neighbors self.path_save = path_save self.w_linearReconstruction = None self.Cov_weights_linearReconstruction = None self.mean_weights_linearReconstruction = None self.neighbor_indices = None self.verbosity = verbosity def fit_transform(self, calculate_again=True): if calculate_again: self.stochastic_linear_reconstruction(calculate_again=calculate_again) if self.verbosity >= 1: print("Linear reconstruction is done...") X_transformed = self.linear_embedding() if self.verbosity >= 1: print("Linear embedding is done...") utils.save_variable(variable=X_transformed, name_of_variable="X_transformed", path_to_save=self.path_save) else: if self.verbosity >= 1: print("Loading previous embedding...") X_transformed = utils.load_variable(name_of_variable="X_transformed", path=self.path_save) return X_transformed def generate_again(self, Cov_weights_linearReconstruction=None, mean_weights_linearReconstruction=None): if self.verbosity >= 1: print("Generating a new embedding (unfolding)...") for sample_index in range(self.n_samples): if self.verbosity >= 1 and sample_index % 1000 == 0: if self.verbosity >= 2: print("processing sample {}/{}".format(sample_index,self.n_samples)) if Cov_weights_linearReconstruction is None: cov_w = self.Cov_weights_linearReconstruction[:, :, sample_index] else: cov_w = Cov_weights_linearReconstruction[:, :, sample_index] if mean_weights_linearReconstruction is None: mean_w = self.mean_weights_linearReconstruction[:, sample_index] else: mean_w = mean_weights_linearReconstruction[:, sample_index] #### sampling weights: self.w_linearReconstruction[sample_index, :] = np.random.multivariate_normal(mean=mean_w.ravel(), cov=cov_w, size=1) X_transformed = self.linear_embedding() return X_transformed def stochastic_linear_reconstruction(self, calculate_again=True): if calculate_again: my_LLE = My_LLE(X=self.X, n_neighbors=self.n_neighbors, n_components=self.n_components) Y_LLE = my_LLE.fit_transform() w_LLE = (my_LLE.w_linearReconstruction).T self.neighbor_indices = my_LLE.neighbor_indices # Phi_ = np.eye(self.n_neighbors) * 1e-10 Phi_ = 0 self.w_linearReconstruction = np.zeros((self.n_samples, self.n_neighbors)) self.mean_weights_linearReconstruction = np.zeros((self.n_neighbors, self.n_samples)) self.Cov_weights_linearReconstruction = np.zeros((self.n_neighbors, self.n_neighbors, self.n_samples)) for sample_index in range(self.n_samples): self.Cov_weights_linearReconstruction[:, :, sample_index] = np.eye(self.n_neighbors) for sample_index in range(self.n_samples): if self.verbosity >= 2 and sample_index % 1000 == 0: print("processing sample {}/{}".format(sample_index,self.n_samples)) neighbor_indices_of_this_sample = self.neighbor_indices[sample_index, :].astype(int) X_neighbors = self.X[:, neighbor_indices_of_this_sample] Y_neighbors = Y_LLE[:, neighbor_indices_of_this_sample] #### sampling w: cov_w = np.linalg.inv( (X_neighbors.T @ X_neighbors) + (Y_neighbors.T @ Y_neighbors) + Phi_ ) # mean_w = cov_w @ ( (X_neighbors.T @ x_i) + (Y_neighbors.T @ y) ) mean_w = w_LLE[:, sample_index] self.w_linearReconstruction[sample_index, :] = np.random.multivariate_normal(mean=mean_w.ravel(), cov=cov_w, size=1) self.Cov_weights_linearReconstruction[:, :, sample_index] = cov_w self.mean_weights_linearReconstruction[:, sample_index] = mean_w.ravel() utils.save_variable(variable=self.w_linearReconstruction, name_of_variable="w_linearReconstruction", path_to_save=self.path_save) utils.save_variable(variable=self.Cov_weights_linearReconstruction, name_of_variable="Cov_weights_linearReconstruction", path_to_save=self.path_save) utils.save_variable(variable=self.mean_weights_linearReconstruction, name_of_variable="mean_weights_linearReconstruction", path_to_save=self.path_save) utils.save_variable(variable=self.neighbor_indices, name_of_variable="neighbor_indices", path_to_save=self.path_save) else: self.w_linearReconstruction = utils.load_variable(name_of_variable="w_linearReconstruction", path=self.path_save) self.Cov_weights_linearReconstruction = utils.load_variable(name_of_variable="Cov_weights_linearReconstruction", path=self.path_save) self.mean_weights_linearReconstruction = utils.load_variable(name_of_variable="mean_weights_linearReconstruction", path=self.path_save) self.neighbor_indices = utils.load_variable(name_of_variable="neighbor_indices", path=self.path_save) def linear_embedding(self): self.W_linearEmbedding = np.zeros((self.n_samples, self.n_samples)) for sample_index in range(self.n_samples): neighbor_indices_of_this_sample = self.neighbor_indices[sample_index, :].astype(int) self.W_linearEmbedding[sample_index, neighbor_indices_of_this_sample] = self.w_linearReconstruction[sample_index, :].ravel() temp = np.eye(self.n_samples) - self.W_linearEmbedding M = (temp.T).dot(temp) eig_val, eig_vec = np.linalg.eigh(M) idx = eig_val.argsort() # sort eigenvalues in ascending order (smallest eigenvalue first) eig_val = eig_val[idx] eig_vec = eig_vec[:, idx] if self.n_components is not None: X_transformed = eig_vec[:, 1:self.n_components+1] #--> note that first eigenvalue is zero else: X_transformed = eig_vec[:, 1:] #--> note that first eigenvalue is zero X_transformed = X_transformed.T #--> the obtained Y in Laplacian eigenmap is row-wise vectors, so we transpose it return X_transformed
0.68056
0.521349
import sys sys.path.append("../python") import numpy as np import matplotlib.pyplot as plt import dmfortfactor as dm dresfile = '../data/C/c12Nmax8chi20hw' hofrequencies = np.arange(15., 25., 1) operators = [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] for operator in operators: print("Operator-%i"%operator) first=True plt.figure(1) plt.clf() plt.figure(2) plt.clf() for i, hofrequency in enumerate(hofrequencies): rlabel = 'run_%s_%s'%(operator,hofrequency) control_dict = { "hofrequency" : hofrequency, "wimpmass" : 500.0, "vearth" : 232.0, "maxwellv0" : 220.0, "vescape" : 550.0, "dmdens" : 0.3, "ntscale" : 2500.0 } cn = np.zeros(15) cn[operator-1] = 1.0 E, R = dm.EventrateSpectra( Z = 6, N = 6, dres = dresfile, controlwords = control_dict, cn = cn, exec_path='../bin/dmfortfactor') if (first): first=False R0 = R plt.figure(1) plt.plot(E,R,label="hw = %s MeV"%hofrequency) plt.figure(2) plt.plot(E,abs(R - R0)/R0,label="hw = %s MeV"%hofrequency) plt.figure(1) plt.title("C-12, 500 GeV WIMP, Op: %s, neutron 4.8E-4"%operator) plt.xlabel('$E_{recoil}$ (keV)') plt.ylabel('Events/MeV') plt.legend(loc=3) plt.xscale('log') if (not all(R==0)): plt.yscale('log') zeros=False else: zeros=True plt.savefig('c12-hw-o%s.pdf'%operator) if (zeros): continue plt.figure(2) plt.title("C-12, 500 GeV WIMP, Op: %s, neutron 4.8E-4"%operator) plt.xlabel('$E_{recoil}$ (keV)') plt.ylabel('Relative error w.r.t. hw=%s'%hofrequencies[0]) plt.legend(loc=2) plt.xscale('log') if (not all(R==0)):plt.yscale('log') plt.savefig('c12-hw-o%s-relerr.pdf'%operator)
examples/exampleHOFrequency.py
import sys sys.path.append("../python") import numpy as np import matplotlib.pyplot as plt import dmfortfactor as dm dresfile = '../data/C/c12Nmax8chi20hw' hofrequencies = np.arange(15., 25., 1) operators = [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] for operator in operators: print("Operator-%i"%operator) first=True plt.figure(1) plt.clf() plt.figure(2) plt.clf() for i, hofrequency in enumerate(hofrequencies): rlabel = 'run_%s_%s'%(operator,hofrequency) control_dict = { "hofrequency" : hofrequency, "wimpmass" : 500.0, "vearth" : 232.0, "maxwellv0" : 220.0, "vescape" : 550.0, "dmdens" : 0.3, "ntscale" : 2500.0 } cn = np.zeros(15) cn[operator-1] = 1.0 E, R = dm.EventrateSpectra( Z = 6, N = 6, dres = dresfile, controlwords = control_dict, cn = cn, exec_path='../bin/dmfortfactor') if (first): first=False R0 = R plt.figure(1) plt.plot(E,R,label="hw = %s MeV"%hofrequency) plt.figure(2) plt.plot(E,abs(R - R0)/R0,label="hw = %s MeV"%hofrequency) plt.figure(1) plt.title("C-12, 500 GeV WIMP, Op: %s, neutron 4.8E-4"%operator) plt.xlabel('$E_{recoil}$ (keV)') plt.ylabel('Events/MeV') plt.legend(loc=3) plt.xscale('log') if (not all(R==0)): plt.yscale('log') zeros=False else: zeros=True plt.savefig('c12-hw-o%s.pdf'%operator) if (zeros): continue plt.figure(2) plt.title("C-12, 500 GeV WIMP, Op: %s, neutron 4.8E-4"%operator) plt.xlabel('$E_{recoil}$ (keV)') plt.ylabel('Relative error w.r.t. hw=%s'%hofrequencies[0]) plt.legend(loc=2) plt.xscale('log') if (not all(R==0)):plt.yscale('log') plt.savefig('c12-hw-o%s-relerr.pdf'%operator)
0.173884
0.280188
import serial import sys import time import serial.tools.list_ports # declare once ser = serial.Serial() def readserial(ser, numlines): readcount = 0 readmore = True while readmore: rcv1 = "" rcv1 = ser.readline() words = rcv1.split() print rcv1 print words readcount = readcount + 1 if readcount > numlines: readcount = 0 readmore = False return 'read in ' + str(numlines) + 'lines' def cmd (ser, cmdlist): for cmd in rtccmd: ser.write(rtccmd) print rtccmd time.sleep(2) def main(): print 'initializing...' serPort = "" totalPorts = 0 count = 0 eggComPort = "" eggCount = 0 eggNotFound = True print 'Ready!' while eggNotFound: # Find Live Ports ports = list(serial.tools.list_ports.comports()) totalPorts = len(ports) print "there are " + str(totalPorts) + " com ports available" for p in ports: print p # This causes each port's information to be printed out. # To search this p data, use p[2]. if "FTDI" in p[2]: # Looks for "FTDI" in P[2]. print "there is an air quality egg on " + p[0] eggComPort = p[0] print "Found AQE on " + eggComPort eggNotFound = False #note- as soon as any egg is found, loop ends. eggCount = eggCount + 1 if count == totalPorts-1: if eggNotFound: print "egg not found!" time.sleep(.5) else: print "There were " + str(eggCount) + " eggs found." #count = totalPorts #kick out of this while loop and read ports again #sys.exit() # Terminates Script. #count = count + 1 time.sleep(2) # pause before looping again# check ports again in 5 seconds time.sleep(2) # Gives user 5 seconds to view Port information -- can be changed/removed. # Set Port ser = serial.Serial(eggComPort, 115200, timeout=30) # Put in your speed and timeout value. # This begins the opening and printout of data from the Arduino. ser.close() # In case the port is already open this closes it. ser.open() # Reopen the port. ser.flushInput() ser.flushOutput() print "connected to port " + eggComPort #CO2 egg has a 16 line header readserial(ser, 16) cfgcmd= ['aqe\n', 'use ntp\n', 'tz_off -4\n', 'backup tz\n', 'ssid WickedDevice\n', 'pwd <PASSWORD>', 'exit\n'] readserial(ser, 102) rtccmd= ['restore defaults\n', 'use ntp\n', 'tz_off -4\n', 'backup tz\n', 'ssid WickedDevice\n', 'pwd <PASSWORD>', 'exit\n'] print 'closing serial port...' ser.close() # In case the port is already open this closes it. ser.open() # Reopen the port. readserial(ser, 200) if __name__ == "__main__": main()
eggSearcher06.py
import serial import sys import time import serial.tools.list_ports # declare once ser = serial.Serial() def readserial(ser, numlines): readcount = 0 readmore = True while readmore: rcv1 = "" rcv1 = ser.readline() words = rcv1.split() print rcv1 print words readcount = readcount + 1 if readcount > numlines: readcount = 0 readmore = False return 'read in ' + str(numlines) + 'lines' def cmd (ser, cmdlist): for cmd in rtccmd: ser.write(rtccmd) print rtccmd time.sleep(2) def main(): print 'initializing...' serPort = "" totalPorts = 0 count = 0 eggComPort = "" eggCount = 0 eggNotFound = True print 'Ready!' while eggNotFound: # Find Live Ports ports = list(serial.tools.list_ports.comports()) totalPorts = len(ports) print "there are " + str(totalPorts) + " com ports available" for p in ports: print p # This causes each port's information to be printed out. # To search this p data, use p[2]. if "FTDI" in p[2]: # Looks for "FTDI" in P[2]. print "there is an air quality egg on " + p[0] eggComPort = p[0] print "Found AQE on " + eggComPort eggNotFound = False #note- as soon as any egg is found, loop ends. eggCount = eggCount + 1 if count == totalPorts-1: if eggNotFound: print "egg not found!" time.sleep(.5) else: print "There were " + str(eggCount) + " eggs found." #count = totalPorts #kick out of this while loop and read ports again #sys.exit() # Terminates Script. #count = count + 1 time.sleep(2) # pause before looping again# check ports again in 5 seconds time.sleep(2) # Gives user 5 seconds to view Port information -- can be changed/removed. # Set Port ser = serial.Serial(eggComPort, 115200, timeout=30) # Put in your speed and timeout value. # This begins the opening and printout of data from the Arduino. ser.close() # In case the port is already open this closes it. ser.open() # Reopen the port. ser.flushInput() ser.flushOutput() print "connected to port " + eggComPort #CO2 egg has a 16 line header readserial(ser, 16) cfgcmd= ['aqe\n', 'use ntp\n', 'tz_off -4\n', 'backup tz\n', 'ssid WickedDevice\n', 'pwd <PASSWORD>', 'exit\n'] readserial(ser, 102) rtccmd= ['restore defaults\n', 'use ntp\n', 'tz_off -4\n', 'backup tz\n', 'ssid WickedDevice\n', 'pwd <PASSWORD>', 'exit\n'] print 'closing serial port...' ser.close() # In case the port is already open this closes it. ser.open() # Reopen the port. readserial(ser, 200) if __name__ == "__main__": main()
0.125923
0.086439
from __future__ import division import json import mailchimp3 from mailchimp3 import MailChimp from user_login_credentials import user_name from user_login_credentials import api_key class single_report: def __init__(self, report_data): self.campaign_id = report_data['id'] self.subject_line = report_data['subject_line'] self.list_name = report_data['list_name'] self.send_time = report_data['send_time'] self.total_sent = report_data['emails_sent'] self.total_bounces = report_data['bounces']['hard_bounces'] + report_data['bounces']['soft_bounces'] + report_data['bounces']['syntax_errors'] self.hard_bounces = report_data['bounces']['hard_bounces'] self.soft_bounces = report_data['bounces']['soft_bounces'] self.total_delivered = self.total_sent - self.total_bounces self.unsubscribes = report_data['unsubscribed'] self.total_opens = report_data['opens']['opens_total'] self.unique_opens = report_data['opens']['unique_opens'] self.total_clicks = report_data['clicks']['clicks_total'] self.unique_clicks = report_data['clicks']['unique_clicks'] self.send_date = self.send_time[0:10] self.delivery_rate = str(self.total_delivered / self.total_sent * 100) + "%" self.open_rate = str("%.2f" % (report_data['opens']['open_rate'] * 100)) + "%" self.click_rate = str("%.2f" % (report_data['clicks']['click_rate'] * 100)) + "%" self.clickthru_rate = str("%.2f" % (self.total_clicks / self.total_delivered * 100)) + "%" #self.click_report = "" def reports_result(date_range, campaign_name_search): client = MailChimp(user_name, api_key) all_json_data = client.reports.all(get_all=True) all_reports = all_json_data['reports'] reports_in_daterange = all_reports#[0:50] # TODO: create new method find_index_for_date_range to handle a simple string date range input and provide the right index number for this filter matching_reports = [reports for reports in reports_in_daterange if campaign_name_search in reports["campaign_title"]] return matching_reports """ def get_click_report(campaign_id): client = MailChimp(user_name, api_key) json_data = client.reports.click_details.all(campaign_id=campaign_id, get_all=False) click_report = json_data['urls_clicked'] return click_report """ class click_report_object(): def __init__(self, c_id): client = MailChimp(user_name, api_key) json_data = client.reports.click_details.all(campaign_id=c_id, get_all=False) links_clicked = json_data['urls_clicked'] self.url_1 = links_clicked[0]["url"] self.total_clicks_1 = links_clicked[0]["total_clicks"] self.total_click_percent_1 = links_clicked[0]["click_percentage"] self.unique_clicks_1 = links_clicked[0]["unique_clicks"] self.unique_click_percent_1 = links_clicked[0]["unique_click_percentage"] self.url_2 = links_clicked[1]["url"] self.total_clicks_2 = links_clicked[1]["total_clicks"] self.total_click_percent_2 = links_clicked[1]["click_percentage"] self.unique_clicks_2 = links_clicked[1]["unique_clicks"] self.unique_click_percent_2 = links_clicked[1]["unique_click_percentage"] self.url_3 = links_clicked[2]["url"] self.total_clicks_3 = links_clicked[2]["total_clicks"] self.total_click_percent_3 = links_clicked[2]["click_percentage"] self.unique_clicks_3 = links_clicked[2]["unique_clicks"] self.unique_click_percent_3 = links_clicked[2]["unique_click_percentage"] self.url_4 = links_clicked[3]["url"] self.total_clicks_4 = links_clicked[3]["total_clicks"] self.total_click_percent_4 = links_clicked[3]["click_percentage"] self.unique_clicks_4 = links_clicked[3]["unique_clicks"] self.unique_click_percent_4 = links_clicked[3]["unique_click_percentage"] self.url_5 = links_clicked[4]["url"] self.total_clicks_5 = links_clicked[4]["total_clicks"] self.total_click_percent_5 = links_clicked[4]["click_percentage"] self.unique_clicks_5 = links_clicked[4]["unique_clicks"] self.unique_click_percent_5 = links_clicked[4]["unique_click_percentage"] self.url_6 = links_clicked[5]["url"] self.total_clicks_6 = links_clicked[5]["total_clicks"] self.total_click_percent_6 = links_clicked[5]["click_percentage"] self.unique_clicks_6 = links_clicked[5]["unique_clicks"] self.unique_click_percent_6 = links_clicked[5]["unique_click_percentage"]
mailchimp_api_wrapper.py
from __future__ import division import json import mailchimp3 from mailchimp3 import MailChimp from user_login_credentials import user_name from user_login_credentials import api_key class single_report: def __init__(self, report_data): self.campaign_id = report_data['id'] self.subject_line = report_data['subject_line'] self.list_name = report_data['list_name'] self.send_time = report_data['send_time'] self.total_sent = report_data['emails_sent'] self.total_bounces = report_data['bounces']['hard_bounces'] + report_data['bounces']['soft_bounces'] + report_data['bounces']['syntax_errors'] self.hard_bounces = report_data['bounces']['hard_bounces'] self.soft_bounces = report_data['bounces']['soft_bounces'] self.total_delivered = self.total_sent - self.total_bounces self.unsubscribes = report_data['unsubscribed'] self.total_opens = report_data['opens']['opens_total'] self.unique_opens = report_data['opens']['unique_opens'] self.total_clicks = report_data['clicks']['clicks_total'] self.unique_clicks = report_data['clicks']['unique_clicks'] self.send_date = self.send_time[0:10] self.delivery_rate = str(self.total_delivered / self.total_sent * 100) + "%" self.open_rate = str("%.2f" % (report_data['opens']['open_rate'] * 100)) + "%" self.click_rate = str("%.2f" % (report_data['clicks']['click_rate'] * 100)) + "%" self.clickthru_rate = str("%.2f" % (self.total_clicks / self.total_delivered * 100)) + "%" #self.click_report = "" def reports_result(date_range, campaign_name_search): client = MailChimp(user_name, api_key) all_json_data = client.reports.all(get_all=True) all_reports = all_json_data['reports'] reports_in_daterange = all_reports#[0:50] # TODO: create new method find_index_for_date_range to handle a simple string date range input and provide the right index number for this filter matching_reports = [reports for reports in reports_in_daterange if campaign_name_search in reports["campaign_title"]] return matching_reports """ def get_click_report(campaign_id): client = MailChimp(user_name, api_key) json_data = client.reports.click_details.all(campaign_id=campaign_id, get_all=False) click_report = json_data['urls_clicked'] return click_report """ class click_report_object(): def __init__(self, c_id): client = MailChimp(user_name, api_key) json_data = client.reports.click_details.all(campaign_id=c_id, get_all=False) links_clicked = json_data['urls_clicked'] self.url_1 = links_clicked[0]["url"] self.total_clicks_1 = links_clicked[0]["total_clicks"] self.total_click_percent_1 = links_clicked[0]["click_percentage"] self.unique_clicks_1 = links_clicked[0]["unique_clicks"] self.unique_click_percent_1 = links_clicked[0]["unique_click_percentage"] self.url_2 = links_clicked[1]["url"] self.total_clicks_2 = links_clicked[1]["total_clicks"] self.total_click_percent_2 = links_clicked[1]["click_percentage"] self.unique_clicks_2 = links_clicked[1]["unique_clicks"] self.unique_click_percent_2 = links_clicked[1]["unique_click_percentage"] self.url_3 = links_clicked[2]["url"] self.total_clicks_3 = links_clicked[2]["total_clicks"] self.total_click_percent_3 = links_clicked[2]["click_percentage"] self.unique_clicks_3 = links_clicked[2]["unique_clicks"] self.unique_click_percent_3 = links_clicked[2]["unique_click_percentage"] self.url_4 = links_clicked[3]["url"] self.total_clicks_4 = links_clicked[3]["total_clicks"] self.total_click_percent_4 = links_clicked[3]["click_percentage"] self.unique_clicks_4 = links_clicked[3]["unique_clicks"] self.unique_click_percent_4 = links_clicked[3]["unique_click_percentage"] self.url_5 = links_clicked[4]["url"] self.total_clicks_5 = links_clicked[4]["total_clicks"] self.total_click_percent_5 = links_clicked[4]["click_percentage"] self.unique_clicks_5 = links_clicked[4]["unique_clicks"] self.unique_click_percent_5 = links_clicked[4]["unique_click_percentage"] self.url_6 = links_clicked[5]["url"] self.total_clicks_6 = links_clicked[5]["total_clicks"] self.total_click_percent_6 = links_clicked[5]["click_percentage"] self.unique_clicks_6 = links_clicked[5]["unique_clicks"] self.unique_click_percent_6 = links_clicked[5]["unique_click_percentage"]
0.183703
0.042305
import json import time from django.db.models import F from django_mysql.models.functions import JSONExtract from scores.models import Score, Round from scores.util import convert_ms_to_minutes def process(score: Score) -> Score: if score.round.challenge_name == 'countme': return process_countme(score) return score def process_countme(score: Score) -> Score: build_code = score.result.get('build', {}).get('code', 0) if build_code != 0: score.state = 'FAILED' score.reason = 'Error in building the image' if score.state == 'FAILED': return score metrics_str = score.result.get('metrics', {}).get('stdout', '{}') metrics_json = json.loads(metrics_str) status_codes = metrics_json.get('status_codes', {}) if len(status_codes) != 1: score.state = 'FAILED' else: count_of_200 = status_codes.get('200', None) if not count_of_200: score.state = 'FAILED' if score.state != 'FAILED': validation_result = metrics_json.get('validation_result', {}) if validation_result.get('status', ) != 'SUCCEEED': score.state = 'FAILED' score.reason = validation_result.get('reason', 'UNKNOWN REASON') if score.state != 'FAILED': p95_latency_ns = metrics_json.get('latencies', {}).get('99th', 999999999) score.main_indicator = p95_latency_ns / 1_000_000.0 return score def prepare_scores_old(challenge_name: str): latest_round = Round.objects.filter(challenge_name=challenge_name, state='FINISHED').last() if latest_round: time_passed_from_last_run = convert_ms_to_minutes((time.time() - latest_round.updated.timestamp()) * 1000) latest_scores = Score.objects.filter(round_id=latest_round.id).order_by( JSONExtract('result', '$.run_result.duration').asc(nulls_last=True) ).all() else: time_passed_from_last_run = 0 latest_scores = [] return latest_scores, time_passed_from_last_run def prepare_scores(challenge_name: str): latest_round = Round.objects.filter(challenge_name=challenge_name, state='FINISHED').last() if latest_round: time_passed_from_last_run = convert_ms_to_minutes((time.time() - latest_round.updated.timestamp()) * 1000) latest_scores = Score.objects.filter(round_id=latest_round.id).order_by( F('main_indicator').asc(nulls_last=True) ).all() else: time_passed_from_last_run = 0 latest_scores = [] return latest_scores, time_passed_from_last_run
scores/score_processor.py
import json import time from django.db.models import F from django_mysql.models.functions import JSONExtract from scores.models import Score, Round from scores.util import convert_ms_to_minutes def process(score: Score) -> Score: if score.round.challenge_name == 'countme': return process_countme(score) return score def process_countme(score: Score) -> Score: build_code = score.result.get('build', {}).get('code', 0) if build_code != 0: score.state = 'FAILED' score.reason = 'Error in building the image' if score.state == 'FAILED': return score metrics_str = score.result.get('metrics', {}).get('stdout', '{}') metrics_json = json.loads(metrics_str) status_codes = metrics_json.get('status_codes', {}) if len(status_codes) != 1: score.state = 'FAILED' else: count_of_200 = status_codes.get('200', None) if not count_of_200: score.state = 'FAILED' if score.state != 'FAILED': validation_result = metrics_json.get('validation_result', {}) if validation_result.get('status', ) != 'SUCCEEED': score.state = 'FAILED' score.reason = validation_result.get('reason', 'UNKNOWN REASON') if score.state != 'FAILED': p95_latency_ns = metrics_json.get('latencies', {}).get('99th', 999999999) score.main_indicator = p95_latency_ns / 1_000_000.0 return score def prepare_scores_old(challenge_name: str): latest_round = Round.objects.filter(challenge_name=challenge_name, state='FINISHED').last() if latest_round: time_passed_from_last_run = convert_ms_to_minutes((time.time() - latest_round.updated.timestamp()) * 1000) latest_scores = Score.objects.filter(round_id=latest_round.id).order_by( JSONExtract('result', '$.run_result.duration').asc(nulls_last=True) ).all() else: time_passed_from_last_run = 0 latest_scores = [] return latest_scores, time_passed_from_last_run def prepare_scores(challenge_name: str): latest_round = Round.objects.filter(challenge_name=challenge_name, state='FINISHED').last() if latest_round: time_passed_from_last_run = convert_ms_to_minutes((time.time() - latest_round.updated.timestamp()) * 1000) latest_scores = Score.objects.filter(round_id=latest_round.id).order_by( F('main_indicator').asc(nulls_last=True) ).all() else: time_passed_from_last_run = 0 latest_scores = [] return latest_scores, time_passed_from_last_run
0.356671
0.173778
# coding=utf-8 import os import sys from aliyunsdkcore.acs_exception import error_code, error_msg from aliyunsdkcore.acs_exception.exceptions import ClientException from xml.dom.minidom import parse from aliyunsdkcore.profile import location_service """ Region&Endpoint provider module. Created on 6/12/2015 @author: alex """ # endpoint list __endpoints = dict() # load endpoints info from endpoints.xml file and parse to dict. parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) __endpoints_file = os.path.join(parent_dir, 'endpoints.xml') try: DOMTree = parse(__endpoints_file) root = DOMTree.documentElement eps = root.getElementsByTagName('Endpoint') for endpoint in eps: region_list = [] product_list = [] regions = endpoint.getElementsByTagName('RegionId') products = endpoint.getElementsByTagName('Product') for region in regions: region_list.append(region.childNodes[0].nodeValue) for product in products: name_node = product.getElementsByTagName('ProductName')[0] name = name_node.childNodes[0].nodeValue domain_node = product.getElementsByTagName('DomainName')[0] domain = domain_node.childNodes[0].nodeValue product_list.append({name: domain}) __endpoints[endpoint.getAttribute('name')] = dict( regions=region_list, products=product_list) except Exception as ex: raise ClientException( error_code.SDK_MISSING_ENDPOINTS_FILER, error_msg.get_msg('SDK_MISSING_ENDPOINTS_FILER')) def find_product_domain(regionid, prod_name): """ Fetch endpoint url with given region id, product name and endpoint list :param regionid: region id :param product: product name :param endpoints: product list :return: endpoint url """ if regionid is not None and product is not None: for point in __endpoints: point_info = __endpoints.get(point) if regionid in point_info.get('regions'): prod_info = point_info.get('products') for prod in prod_info: if prod_name in prod: return prod.get(prod_name) return None def add_endpoint(product_name, region_id, end_point): modify_point(product_name, region_id, end_point) location_service.set_cache(product_name, region_id, end_point) def modify_point(product_name, region_id, end_point): for point in __endpoints: if point == region_id: point_info = __endpoints.get(point) region_list = point_info.get('regions') products = point_info.get('products') if region_id is not None and region_id not in region_list: region_list.append(region_id) if end_point is not None: product_exit = 0 for prod in products: if product_name in prod: prod[product_name] = end_point product_exit = 1 if product_exit == 0: item = dict() item[product_name] = end_point products.append(item) __mdict = dict() __mdict['regions'] = region_list __mdict['products'] = products __endpoints[point] = __mdict return region_list = [] products = [] region_list.append(region_id) item = dict() item[product_name] = end_point products.append(item) __mdict = dict() __mdict['regions'] = region_list __mdict['products'] = products __endpoints[region_id] = __mdict def convert_dict_to_endpointsxml(mdict): regions = list() products = list() for point in mdict: point_info = mdict.get(point) regions = point_info.get('regions') products = point_info.get('products') content = '' prefix = '<?xml version="1.0" encoding="UTF-8"?>\n<Endpoints>\n<Endpoint name="cn-hangzhou">\n' endfix = '</Endpoint>\n</Endpoints>\n' content += prefix content += '<RegionIds>\n' for item in regions: content += '<RegionId>' + item + '</RegionId>\n' content += '</RegionIds>\n' + '<Products>\n' for item in products: content += '<Product>\n' content += '<ProductName>' + list(item.keys())[0] + '</ProductName>\n' content += '<DomainName>' + item[list(item.keys())[0]] + '</DomainName>\n' content += '</Product>\n' content += '</Products>' content += endfix # print content if not os.path.isfile(__endpoints_file): _createFile(__endpoints_file) f = open(__endpoints_file, 'w') try: f.write(''.join(content)) except Exception as e: print(e) print("Please confirm you has use sudo + cmd") finally: f.close() def _createFile(filename): namePath = os.path.split(filename)[0] if not os.path.isdir(namePath): os.makedirs(namePath) with os.fdopen(os.open(filename, os.O_WRONLY | os.O_CREAT, 0o600), 'w'): pass if __name__ == '__main__': print(find_product_domain('cn-hangzhou', 'Rds')) modify_point('ecs', 'cn-beijing-2', 'ecs.aliyuncs.com')
aliyunsdkcore/profile/region_provider.py
# coding=utf-8 import os import sys from aliyunsdkcore.acs_exception import error_code, error_msg from aliyunsdkcore.acs_exception.exceptions import ClientException from xml.dom.minidom import parse from aliyunsdkcore.profile import location_service """ Region&Endpoint provider module. Created on 6/12/2015 @author: alex """ # endpoint list __endpoints = dict() # load endpoints info from endpoints.xml file and parse to dict. parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) __endpoints_file = os.path.join(parent_dir, 'endpoints.xml') try: DOMTree = parse(__endpoints_file) root = DOMTree.documentElement eps = root.getElementsByTagName('Endpoint') for endpoint in eps: region_list = [] product_list = [] regions = endpoint.getElementsByTagName('RegionId') products = endpoint.getElementsByTagName('Product') for region in regions: region_list.append(region.childNodes[0].nodeValue) for product in products: name_node = product.getElementsByTagName('ProductName')[0] name = name_node.childNodes[0].nodeValue domain_node = product.getElementsByTagName('DomainName')[0] domain = domain_node.childNodes[0].nodeValue product_list.append({name: domain}) __endpoints[endpoint.getAttribute('name')] = dict( regions=region_list, products=product_list) except Exception as ex: raise ClientException( error_code.SDK_MISSING_ENDPOINTS_FILER, error_msg.get_msg('SDK_MISSING_ENDPOINTS_FILER')) def find_product_domain(regionid, prod_name): """ Fetch endpoint url with given region id, product name and endpoint list :param regionid: region id :param product: product name :param endpoints: product list :return: endpoint url """ if regionid is not None and product is not None: for point in __endpoints: point_info = __endpoints.get(point) if regionid in point_info.get('regions'): prod_info = point_info.get('products') for prod in prod_info: if prod_name in prod: return prod.get(prod_name) return None def add_endpoint(product_name, region_id, end_point): modify_point(product_name, region_id, end_point) location_service.set_cache(product_name, region_id, end_point) def modify_point(product_name, region_id, end_point): for point in __endpoints: if point == region_id: point_info = __endpoints.get(point) region_list = point_info.get('regions') products = point_info.get('products') if region_id is not None and region_id not in region_list: region_list.append(region_id) if end_point is not None: product_exit = 0 for prod in products: if product_name in prod: prod[product_name] = end_point product_exit = 1 if product_exit == 0: item = dict() item[product_name] = end_point products.append(item) __mdict = dict() __mdict['regions'] = region_list __mdict['products'] = products __endpoints[point] = __mdict return region_list = [] products = [] region_list.append(region_id) item = dict() item[product_name] = end_point products.append(item) __mdict = dict() __mdict['regions'] = region_list __mdict['products'] = products __endpoints[region_id] = __mdict def convert_dict_to_endpointsxml(mdict): regions = list() products = list() for point in mdict: point_info = mdict.get(point) regions = point_info.get('regions') products = point_info.get('products') content = '' prefix = '<?xml version="1.0" encoding="UTF-8"?>\n<Endpoints>\n<Endpoint name="cn-hangzhou">\n' endfix = '</Endpoint>\n</Endpoints>\n' content += prefix content += '<RegionIds>\n' for item in regions: content += '<RegionId>' + item + '</RegionId>\n' content += '</RegionIds>\n' + '<Products>\n' for item in products: content += '<Product>\n' content += '<ProductName>' + list(item.keys())[0] + '</ProductName>\n' content += '<DomainName>' + item[list(item.keys())[0]] + '</DomainName>\n' content += '</Product>\n' content += '</Products>' content += endfix # print content if not os.path.isfile(__endpoints_file): _createFile(__endpoints_file) f = open(__endpoints_file, 'w') try: f.write(''.join(content)) except Exception as e: print(e) print("Please confirm you has use sudo + cmd") finally: f.close() def _createFile(filename): namePath = os.path.split(filename)[0] if not os.path.isdir(namePath): os.makedirs(namePath) with os.fdopen(os.open(filename, os.O_WRONLY | os.O_CREAT, 0o600), 'w'): pass if __name__ == '__main__': print(find_product_domain('cn-hangzhou', 'Rds')) modify_point('ecs', 'cn-beijing-2', 'ecs.aliyuncs.com')
0.253214
0.064418
import socket class NETPowerConnector(): """Connects to NET Power Control via UDP. """ def __init__(self): self.user = 'admin' self.password = '<PASSWORD>' def send_to_power_control(self, message): """Send string message to Power Control via UDP. Input: message as string. Output: None """ # Specifiy NET Power Control's IP and Port UDP_IP = "192.168.3.11" UDP_TO_PORT = 75 # Context Manager to close Connection automatically with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as conn: conn.sendto(message.encode(), (UDP_IP, UDP_TO_PORT)) def listen_to_power_control(): """Receive message from Power Control. Input: None Output: Received data as string. """ UDP_FROM_PORT = 7700 # Context Manager to close Connection automatically with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as conn: conn.bind(('', UDP_FROM_PORT)) data, addr = conn.recvfrom(1024) # 1024 is buffersize data = data.decode() return data def turn_on_relay(self, number): """Turn on relay on NET Power Control. Input: relay number as int. """ number = str(number) message = 'Sw_on' + number + self.user + self.password self.send_to_power_control(message) def turn_off_relay(self, number): """Turn off relay on NET Power Control. Input: relay number as int. """ number = str(number) message = 'Sw_off' + number + self.user + self.password self.send_to_power_control(message) def ask_state(self): """Get current state of Power control. Input: None Output: String representing State. """ message = "wer da?" self.send_to_power_control(message) reply = self.listen_to_power_control() return reply
WaltzControl/PowerControl/power_connector.py
import socket class NETPowerConnector(): """Connects to NET Power Control via UDP. """ def __init__(self): self.user = 'admin' self.password = '<PASSWORD>' def send_to_power_control(self, message): """Send string message to Power Control via UDP. Input: message as string. Output: None """ # Specifiy NET Power Control's IP and Port UDP_IP = "192.168.3.11" UDP_TO_PORT = 75 # Context Manager to close Connection automatically with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as conn: conn.sendto(message.encode(), (UDP_IP, UDP_TO_PORT)) def listen_to_power_control(): """Receive message from Power Control. Input: None Output: Received data as string. """ UDP_FROM_PORT = 7700 # Context Manager to close Connection automatically with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as conn: conn.bind(('', UDP_FROM_PORT)) data, addr = conn.recvfrom(1024) # 1024 is buffersize data = data.decode() return data def turn_on_relay(self, number): """Turn on relay on NET Power Control. Input: relay number as int. """ number = str(number) message = 'Sw_on' + number + self.user + self.password self.send_to_power_control(message) def turn_off_relay(self, number): """Turn off relay on NET Power Control. Input: relay number as int. """ number = str(number) message = 'Sw_off' + number + self.user + self.password self.send_to_power_control(message) def ask_state(self): """Get current state of Power control. Input: None Output: String representing State. """ message = "wer da?" self.send_to_power_control(message) reply = self.listen_to_power_control() return reply
0.57821
0.178938
import threading import logging import os import time import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.const import ( EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP) REQUIREMENTS = ['evdev==0.6.1'] _LOGGER = logging.getLogger(__name__) DEVICE_DESCRIPTOR = 'device_descriptor' DEVICE_ID_GROUP = 'Device description' DEVICE_NAME = 'device_name' DOMAIN = 'keyboard_remote' ICON = 'mdi:remote' KEY_CODE = 'key_code' KEY_VALUE = {'key_up': 0, 'key_down': 1, 'key_hold': 2} KEYBOARD_REMOTE_COMMAND_RECEIVED = 'keyboard_remote_command_received' KEYBOARD_REMOTE_CONNECTED = 'keyboard_remote_connected' KEYBOARD_REMOTE_DISCONNECTED = 'keyboard_remote_disconnected' TYPE = 'type' CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.All(cv.ensure_list, [vol.Schema({ vol.Exclusive(DEVICE_DESCRIPTOR, DEVICE_ID_GROUP): cv.string, vol.Exclusive(DEVICE_NAME, DEVICE_ID_GROUP): cv.string, vol.Optional(TYPE, default='key_up'): vol.All(cv.string, vol.Any('key_up', 'key_down', 'key_hold')) })]) }, extra=vol.ALLOW_EXTRA) def setup(hass, config): """Set up the keyboard_remote.""" config = config.get(DOMAIN) keyboard_remote = KeyboardRemote(hass, config) def _start_keyboard_remote(_event): keyboard_remote.run() def _stop_keyboard_remote(_event): keyboard_remote.stop() hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_keyboard_remote) hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_keyboard_remote) return True class KeyboardRemoteThread(threading.Thread): """This interfaces with the inputdevice using evdev.""" def __init__(self, hass, device_name, device_descriptor, key_value): """Construct a thread listening for events on one device.""" self.hass = hass self.device_name = device_name self.device_descriptor = device_descriptor self.key_value = key_value if self.device_descriptor: self.device_id = self.device_descriptor else: self.device_id = self.device_name self.dev = self._get_keyboard_device() if self.dev is not None: _LOGGER.debug("Keyboard connected, %s", self.device_id) else: _LOGGER.debug( "Keyboard not connected, %s. " "Check /dev/input/event* permissions", self.device_id) id_folder = '/dev/input/by-id/' if os.path.isdir(id_folder): from evdev import InputDevice, list_devices device_names = [InputDevice(file_name).name for file_name in list_devices()] _LOGGER.debug( "Possible device names are: %s. " "Possible device descriptors are %s: %s", device_names, id_folder, os.listdir(id_folder)) threading.Thread.__init__(self) self.stopped = threading.Event() self.hass = hass def _get_keyboard_device(self): """Get the keyboard device.""" from evdev import InputDevice, list_devices if self.device_name: devices = [InputDevice(file_name) for file_name in list_devices()] for device in devices: if self.device_name == device.name: return device elif self.device_descriptor: try: device = InputDevice(self.device_descriptor) except OSError: pass else: return device return None def run(self): """Run the loop of the KeyboardRemote.""" from evdev import categorize, ecodes if self.dev is not None: self.dev.grab() _LOGGER.debug("Interface started for %s", self.dev) while not self.stopped.isSet(): # Sleeps to ease load on processor time.sleep(.05) if self.dev is None: self.dev = self._get_keyboard_device() if self.dev is not None: self.dev.grab() self.hass.bus.fire( KEYBOARD_REMOTE_CONNECTED, { DEVICE_DESCRIPTOR: self.device_descriptor, DEVICE_NAME: self.device_name } ) _LOGGER.debug("Keyboard re-connected, %s", self.device_id) else: continue try: event = self.dev.read_one() except IOError: # Keyboard Disconnected self.dev = None self.hass.bus.fire( KEYBOARD_REMOTE_DISCONNECTED, { DEVICE_DESCRIPTOR: self.device_descriptor, DEVICE_NAME: self.device_name } ) _LOGGER.debug("Keyboard disconnected, %s", self.device_id) continue if not event: continue if event.type is ecodes.EV_KEY and event.value is self.key_value: _LOGGER.debug(categorize(event)) self.hass.bus.fire( KEYBOARD_REMOTE_COMMAND_RECEIVED, { KEY_CODE: event.code, DEVICE_DESCRIPTOR: self.device_descriptor, DEVICE_NAME: self.device_name } ) class KeyboardRemote: """Sets up one thread per device.""" def __init__(self, hass, config): """Construct a KeyboardRemote interface object.""" self.threads = [] for dev_block in config: device_descriptor = dev_block.get(DEVICE_DESCRIPTOR) device_name = dev_block.get(DEVICE_NAME) key_value = KEY_VALUE.get(dev_block.get(TYPE, 'key_up')) if device_descriptor is not None\ or device_name is not None: thread = KeyboardRemoteThread( hass, device_name, device_descriptor, key_value) self.threads.append(thread) def run(self): """Run all event listener threads.""" for thread in self.threads: thread.start() def stop(self): """Stop all event listener threads.""" for thread in self.threads: thread.stopped.set()
homeassistant/components/keyboard_remote.py
import threading import logging import os import time import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.const import ( EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP) REQUIREMENTS = ['evdev==0.6.1'] _LOGGER = logging.getLogger(__name__) DEVICE_DESCRIPTOR = 'device_descriptor' DEVICE_ID_GROUP = 'Device description' DEVICE_NAME = 'device_name' DOMAIN = 'keyboard_remote' ICON = 'mdi:remote' KEY_CODE = 'key_code' KEY_VALUE = {'key_up': 0, 'key_down': 1, 'key_hold': 2} KEYBOARD_REMOTE_COMMAND_RECEIVED = 'keyboard_remote_command_received' KEYBOARD_REMOTE_CONNECTED = 'keyboard_remote_connected' KEYBOARD_REMOTE_DISCONNECTED = 'keyboard_remote_disconnected' TYPE = 'type' CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.All(cv.ensure_list, [vol.Schema({ vol.Exclusive(DEVICE_DESCRIPTOR, DEVICE_ID_GROUP): cv.string, vol.Exclusive(DEVICE_NAME, DEVICE_ID_GROUP): cv.string, vol.Optional(TYPE, default='key_up'): vol.All(cv.string, vol.Any('key_up', 'key_down', 'key_hold')) })]) }, extra=vol.ALLOW_EXTRA) def setup(hass, config): """Set up the keyboard_remote.""" config = config.get(DOMAIN) keyboard_remote = KeyboardRemote(hass, config) def _start_keyboard_remote(_event): keyboard_remote.run() def _stop_keyboard_remote(_event): keyboard_remote.stop() hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_keyboard_remote) hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_keyboard_remote) return True class KeyboardRemoteThread(threading.Thread): """This interfaces with the inputdevice using evdev.""" def __init__(self, hass, device_name, device_descriptor, key_value): """Construct a thread listening for events on one device.""" self.hass = hass self.device_name = device_name self.device_descriptor = device_descriptor self.key_value = key_value if self.device_descriptor: self.device_id = self.device_descriptor else: self.device_id = self.device_name self.dev = self._get_keyboard_device() if self.dev is not None: _LOGGER.debug("Keyboard connected, %s", self.device_id) else: _LOGGER.debug( "Keyboard not connected, %s. " "Check /dev/input/event* permissions", self.device_id) id_folder = '/dev/input/by-id/' if os.path.isdir(id_folder): from evdev import InputDevice, list_devices device_names = [InputDevice(file_name).name for file_name in list_devices()] _LOGGER.debug( "Possible device names are: %s. " "Possible device descriptors are %s: %s", device_names, id_folder, os.listdir(id_folder)) threading.Thread.__init__(self) self.stopped = threading.Event() self.hass = hass def _get_keyboard_device(self): """Get the keyboard device.""" from evdev import InputDevice, list_devices if self.device_name: devices = [InputDevice(file_name) for file_name in list_devices()] for device in devices: if self.device_name == device.name: return device elif self.device_descriptor: try: device = InputDevice(self.device_descriptor) except OSError: pass else: return device return None def run(self): """Run the loop of the KeyboardRemote.""" from evdev import categorize, ecodes if self.dev is not None: self.dev.grab() _LOGGER.debug("Interface started for %s", self.dev) while not self.stopped.isSet(): # Sleeps to ease load on processor time.sleep(.05) if self.dev is None: self.dev = self._get_keyboard_device() if self.dev is not None: self.dev.grab() self.hass.bus.fire( KEYBOARD_REMOTE_CONNECTED, { DEVICE_DESCRIPTOR: self.device_descriptor, DEVICE_NAME: self.device_name } ) _LOGGER.debug("Keyboard re-connected, %s", self.device_id) else: continue try: event = self.dev.read_one() except IOError: # Keyboard Disconnected self.dev = None self.hass.bus.fire( KEYBOARD_REMOTE_DISCONNECTED, { DEVICE_DESCRIPTOR: self.device_descriptor, DEVICE_NAME: self.device_name } ) _LOGGER.debug("Keyboard disconnected, %s", self.device_id) continue if not event: continue if event.type is ecodes.EV_KEY and event.value is self.key_value: _LOGGER.debug(categorize(event)) self.hass.bus.fire( KEYBOARD_REMOTE_COMMAND_RECEIVED, { KEY_CODE: event.code, DEVICE_DESCRIPTOR: self.device_descriptor, DEVICE_NAME: self.device_name } ) class KeyboardRemote: """Sets up one thread per device.""" def __init__(self, hass, config): """Construct a KeyboardRemote interface object.""" self.threads = [] for dev_block in config: device_descriptor = dev_block.get(DEVICE_DESCRIPTOR) device_name = dev_block.get(DEVICE_NAME) key_value = KEY_VALUE.get(dev_block.get(TYPE, 'key_up')) if device_descriptor is not None\ or device_name is not None: thread = KeyboardRemoteThread( hass, device_name, device_descriptor, key_value) self.threads.append(thread) def run(self): """Run all event listener threads.""" for thread in self.threads: thread.start() def stop(self): """Stop all event listener threads.""" for thread in self.threads: thread.stopped.set()
0.466603
0.052765
import os import argparse import torch import random import numpy as np from torch.utils.data import DataLoader from datasets.dataset_dfc import DFC2020 from networks.propnets import E_Fusion from utils.util import RandomApply, default, seed_torch from utils.losses import HardNegtive_loss from kornia import filters from torch.optim.lr_scheduler import MultiStepLR, CosineAnnealingWarmRestarts from utils.augmentation.augmentation import RandomHorizontalFlip, RandomVerticalFlip, RandomRotation, \ RandomAffine, RandomPerspective from utils.augmentation.aug_params import RandomHorizontalFlip_params, RandomVerticalFlip_params, \ RandomRotation_params, RandomAffine_params, RandomPerspective_params def get_scheduler(optimizer, args): if args.lr_step == "cos": return CosineAnnealingWarmRestarts( optimizer, T_0=args.epochs if args.T0 is None else args.T0, T_mult=args.Tmult, eta_min=args.eta_min, ) elif args.lr_step == "step": m = [args.epochs - a for a in args.drop] return MultiStepLR(optimizer, milestones=m, gamma=args.drop_gamma) else: return None def parse_option(): parser = argparse.ArgumentParser('argument for training') # 1600 parser.add_argument('--batch_size', type=int, default=1000, help='batch_size') parser.add_argument('--crop_size', type=int, default=32, help='crop_size') parser.add_argument('--num_workers', type=int, default=0, help='num of workers to use') parser.add_argument('--epochs', type=int, default=700, help='number of training epochs') # resume path parser.add_argument('--resume', action='store_true', default=True, help='path to latest checkpoint (default: none)') parser.add_argument('--in_dim', type=int, default=256, help='dim of feat for inner product') parser.add_argument('--feat_dim', type=int, default=256, help='dim of feat for inner product') # learning rate parser.add_argument("--T0", type=int, help="period (for --lr_step cos)") parser.add_argument("--Tmult", type=int, default=1, help="period factor (for --lr_step cos)") parser.add_argument("--lr_step", type=str, choices=["cos", "step", "none"], default="step", help="learning rate schedule type") parser.add_argument("--lr", type=float, default=3e-3, help="learning rate") parser.add_argument("--eta_min", type=float, default=0, help="min learning rate (for --lr_step cos)") parser.add_argument("--adam_l2", type=float, default=1e-6, help="weight decay (L2 penalty)") parser.add_argument("--drop", type=int, nargs="*", default=[50, 25], help="milestones for learning rate decay (0 = last epoch)") parser.add_argument("--drop_gamma", type=float, default=0.2, help="multiplicative factor of learning rate decay") parser.add_argument("--no_lr_warmup", dest="lr_warmup", action="store_false", help="do not use learning rate warmup") # input/output parser.add_argument('--use_s2hr', action='store_true', default=True, help='use sentinel-2 high-resolution (10 m) bands') parser.add_argument('--use_s2mr', action='store_true', default=False, help='use sentinel-2 medium-resolution (20 m) bands') parser.add_argument('--use_s2lr', action='store_true', default=False, help='use sentinel-2 low-resolution (60 m) bands') parser.add_argument('--use_s1', action='store_true', default=True, help='use sentinel-1 data') #True for OSCD False for DFC2020 parser.add_argument('--no_savanna', action='store_true', default=False, help='ignore class savanna') # add new views #'/workplace/OSCD' #'/R740-75T/Chenyx/Workplace/OSCD' parser.add_argument('--data_dir_train', type=str, default='/workplace/DFC2020', help='path to training dataset') parser.add_argument('--model_path', type=str, default='./save', help='path to save model') parser.add_argument('--save', type=str, default='./EfusionS1', help='path to save linear classifier') opt = parser.parse_args() # set up saving name opt.save_path = os.path.join(opt.model_path, opt.save) if not os.path.isdir(opt.save_path): os.makedirs(opt.save_path) if not os.path.isdir(opt.data_dir_train): raise ValueError('data path not exist: {}'.format(opt.data_dir_train)) return opt def get_train_loader(args): # load datasets train_set = DFC2020(args.data_dir_train, subset="train", no_savanna=args.no_savanna, use_s2hr=args.use_s2hr, use_s2mr=args.use_s2mr, use_s2lr=args.use_s2lr, use_s1=args.use_s1, transform=True, unlabeled=True, crop_size=args.crop_size) #train_index='./utils/train_40.npy') n_classes = train_set.n_classes n_inputs = train_set.n_inputs args.no_savanna = train_set.no_savanna args.display_channels = train_set.display_channels args.brightness_factor = train_set.brightness_factor train_size = int(0.16 * len(train_set)) test_size = len(train_set) - train_size train_dataset, test_dataset = torch.utils.data.random_split(train_set, [train_size, test_size]) # set up dataloaders train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=False) return train_loader, n_inputs, n_classes class Trainer: def __init__(self, args, online_network, optimizer, criterion, scheduler, device): self.args = args DEFAULT_AUG = RandomApply(filters.GaussianBlur2d((11, 11), (1.5, 2.5)), p=0.5) augment_fn = None self.augment = default(augment_fn, DEFAULT_AUG) self.augment_type = ['Horizontalflip', 'VerticalFlip'] self.rot_agl = 15 self.dis_scl = 0.2 self.scl_sz = [0.8, 1.2] self.shear = [-0.2, 0.2] # self.mov_rg = random.uniform(-0.2, 0.2) self.aug_RHF = RandomHorizontalFlip(p=1) self.aug_RVF = RandomVerticalFlip(p=1) self.aug_ROT = RandomRotation(p=1, theta=self.rot_agl, interpolation='nearest') self.aug_PST = RandomPerspective(p=1, distortion_scale=0.3) self.aug_AFF = RandomAffine(p=1, theta=0, h_trans=random.uniform(-0.2, 0.2), v_trans=random.uniform(-0.2, 0.2), scale=None, shear=None, interpolation='nearest') self.online_network = online_network self.optimizer = optimizer self.scheduler = scheduler self.device = device self.savepath = args.save_path self.criterion = criterion self.max_epochs = args.epochs self.batch_size = args.batch_size self.num_workers = args.num_workers self.feat_dim = args.feat_dim self.lr_warmup = args.lr_warmup_val self.lr = args.lr self.lr_step = args.lr_step def aug_list(self, img, model, params): for i in range(len(model)): img = model[i](img, params[i]) return img def train(self, train_loader): niter = 0 for epoch_counter in range(self.max_epochs): train_loss = 0.0 iters = len(train_loader) for idx, batch in enumerate(train_loader): if self.lr_warmup < 50: lr_scale = (self.lr_warmup + 1) / 50 for pg in self.optimizer.param_groups: pg["lr"] = self.lr * lr_scale self.lr_warmup += 1 image = batch['image'] segmt = batch['segments'] loss = self.update(image, segmt) self.optimizer.zero_grad() loss.backward() self.optimizer.step() niter += 1 train_loss += loss.item() if self.lr_step == "cos" and self.lr_warmup >= 50: self.scheduler.step(epoch_counter + idx / iters) if self.lr_step == "step": self.scheduler.step() train_loss = train_loss / len(train_loader) print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch_counter, train_loss)) # save checkpoints if (epoch_counter + 1) % 100 == 0: self.save_model(os.path.join(self.savepath, 'twins_epoch_{epoch}_{loss}.pth'.format(epoch=epoch_counter, loss=train_loss))) torch.cuda.empty_cache() def update(self, image, segmt): args = self.args sample_num = 1 aug_type = random.sample(self.augment_type, sample_num) # augmentations model = [] param = [] if 'Horizontalflip' in aug_type: model.append(self.aug_RHF) param.append(RandomHorizontalFlip_params(0.5, image.shape[0], image.shape[-2:], self.device, image.dtype)) if 'VerticalFlip' in aug_type: model.append(self.aug_RVF) param.append(RandomVerticalFlip_params(0.5, image.shape[0], image.shape[-2:], self.device, image.dtype)) model.append(self.aug_AFF) param.append(RandomAffine_params(1.0, 0.0, random.uniform(-0.2, 0.2), random.uniform(-0.2, 0.2), None, None, image.shape[0], image.shape[-2:], self.device, image.dtype)) # split input _, image = torch.split(image, [4, 2], dim=1) batch_view_1 = image.to(self.device) batch_view_2 = image.to(self.device) # tranforme one input view batch_view_1 = self.aug_list(batch_view_1, model, param) # 32 batch_view_1 = batch_view_1[:, :, 8: 24, 8: 24] batch_view_2 = batch_view_2[:, :, 8: 24, 8: 24] batch_segm_2 = segmt[:, 8: 24, 8: 24] # compute query feature l_feature1, l_feature2, loss_vq = self.online_network(batch_view_1, batch_view_2, mode=0) l_feature2 = self.aug_list(l_feature2, model, param) # mask no-overlap with torch.no_grad(): batch_segm_2 = batch_segm_2.unsqueeze(dim=1) batch_segm_2 = self.aug_list(batch_segm_2.float(), model, param)[:, 0, :, :] ones = self.mask_spix(batch_segm_2) one_mask = ones.long().eq(1).to(self.device) batch_segm_2 = batch_segm_2.long().to(self.device) l_feature1, l_feature2= self.get_spix_data(batch_segm_2, one_mask, l_feature1, l_feature2) # pixel loss loss = self.criterion(l_feature1, l_feature2) + loss_vq return loss def save_model(self, PATH): print('==> Saving...') state = { 'online_network_state_dict': self.online_network.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), } torch.save(state, PATH) # help release GPU memory del state def get_spix_data(self, batch_segm_2, one_mask, inFeats1, inFeats2): bs, C, H, W = inFeats1.shape batch_segm_2 = batch_segm_2 * one_mask one_mask = one_mask.contiguous().view(-1) new_seg = batch_segm_2.view(-1).contiguous() values_idx = new_seg[one_mask] outFeats1 = torch.zeros((len(values_idx), C)).to(self.device) outFeats2 = torch.zeros((len(values_idx), C)).to(self.device) unique = torch.unique(values_idx, sorted=False, return_inverse=False, dim=0) for i in unique: s0 = batch_segm_2 == i s0_idx = values_idx == i spix_idx = s0.sum(axis=1).sum(axis=1) == 0 ex_dim_s0 = s0[:, None, :, :] mask_nums = s0.sum(axis=1).sum(axis=1) mask_nums[mask_nums == 0] = 1 mask_nums = mask_nums[:, None] masked1 = ex_dim_s0 * inFeats1 masked2 = ex_dim_s0 * inFeats2 ## first sum_sup_feats1 = masked1.sum(axis=2).sum(axis=2) avg_sup_feats1 = sum_sup_feats1 / mask_nums outFeats1[s0_idx, :] = avg_sup_feats1[~spix_idx, :] ## second sum_sup_feats2 = masked2.sum(axis=2).sum(axis=2) avg_sup_feats2 = sum_sup_feats2 / mask_nums outFeats2[s0_idx, :] = avg_sup_feats2[~spix_idx, :] return outFeats1, outFeats2 def mask_spix(self, image): b, w, h = image.shape zero = torch.zeros((b, w, h)) samples = np.random.randint(w, size=(200, 2)) for i in range(b): img_i = image[i][samples[:, 0], samples[:, 1]] val_i, index = self.unique(img_i) if len(val_i) > 0 and val_i[0] == 0: val_i = val_i[1::] index = index[1::] # print(val_i) if len(index) == 1: unique_i = samples[index] zero[i][unique_i[0], unique_i[1]] = 1 elif len(index) > 1: unique_i = samples[index] zero[i][unique_i[:, 0], unique_i[:, 1]] = 1 return zero def unique(self, x, dim=0): unique, inverse = torch.unique( x, sorted=True, return_inverse=True, dim=dim) perm = torch.arange(inverse.size(0), dtype=inverse.dtype, device=inverse.device) inverse, perm = inverse.flip([0]), perm.flip([0]) return unique, inverse.new_empty(unique.size(0)).scatter_(0, inverse, perm) def main(): # parse the args args = parse_option() # set flags for GPU processing if available #device = 'cuda' if torch.cuda.is_available() else 'cpu' device = 'cuda' # set the data loader train_loader, n_inputs, n_classes = get_train_loader(args) args.n_inputs = n_inputs args.n_classes = n_classes # set the model online_network = E_Fusion(width=1, in_channel=2, in_dim=args.in_dim, feat_dim=args.feat_dim).to(device) ## load pre-trained model if defined if args.resume: try: print('loading pretrained models') checkpoints_folder = os.path.join('.', 'save/EfusionS1') # load pre-trained parameters load_params = torch.load( os.path.join(os.path.join(checkpoints_folder, 'twins_epoch_599_10.689021110534668.pth')), map_location=device) online_network.load_state_dict(load_params['online_network_state_dict'], strict=False) except FileNotFoundError: print("Pre-trained weights not found. Training from scratch.") # target encoder criterion = HardNegtive_loss() optimizer = torch.optim.Adam(online_network.parameters(), lr=3e-4, weight_decay=1e-4) scheduler = get_scheduler(optimizer, args) args.lr_warmup_val = 0 if args.lr_warmup else 50 trainer = Trainer(args, online_network=online_network, optimizer=optimizer, criterion=criterion, scheduler=scheduler, device=device) trainer.train(train_loader) if __name__ == '__main__': os.environ["CUDA_VISIBLE_DEVICES"] = "0" seed_torch(seed=1024) main()
train_EfusionS1.py
import os import argparse import torch import random import numpy as np from torch.utils.data import DataLoader from datasets.dataset_dfc import DFC2020 from networks.propnets import E_Fusion from utils.util import RandomApply, default, seed_torch from utils.losses import HardNegtive_loss from kornia import filters from torch.optim.lr_scheduler import MultiStepLR, CosineAnnealingWarmRestarts from utils.augmentation.augmentation import RandomHorizontalFlip, RandomVerticalFlip, RandomRotation, \ RandomAffine, RandomPerspective from utils.augmentation.aug_params import RandomHorizontalFlip_params, RandomVerticalFlip_params, \ RandomRotation_params, RandomAffine_params, RandomPerspective_params def get_scheduler(optimizer, args): if args.lr_step == "cos": return CosineAnnealingWarmRestarts( optimizer, T_0=args.epochs if args.T0 is None else args.T0, T_mult=args.Tmult, eta_min=args.eta_min, ) elif args.lr_step == "step": m = [args.epochs - a for a in args.drop] return MultiStepLR(optimizer, milestones=m, gamma=args.drop_gamma) else: return None def parse_option(): parser = argparse.ArgumentParser('argument for training') # 1600 parser.add_argument('--batch_size', type=int, default=1000, help='batch_size') parser.add_argument('--crop_size', type=int, default=32, help='crop_size') parser.add_argument('--num_workers', type=int, default=0, help='num of workers to use') parser.add_argument('--epochs', type=int, default=700, help='number of training epochs') # resume path parser.add_argument('--resume', action='store_true', default=True, help='path to latest checkpoint (default: none)') parser.add_argument('--in_dim', type=int, default=256, help='dim of feat for inner product') parser.add_argument('--feat_dim', type=int, default=256, help='dim of feat for inner product') # learning rate parser.add_argument("--T0", type=int, help="period (for --lr_step cos)") parser.add_argument("--Tmult", type=int, default=1, help="period factor (for --lr_step cos)") parser.add_argument("--lr_step", type=str, choices=["cos", "step", "none"], default="step", help="learning rate schedule type") parser.add_argument("--lr", type=float, default=3e-3, help="learning rate") parser.add_argument("--eta_min", type=float, default=0, help="min learning rate (for --lr_step cos)") parser.add_argument("--adam_l2", type=float, default=1e-6, help="weight decay (L2 penalty)") parser.add_argument("--drop", type=int, nargs="*", default=[50, 25], help="milestones for learning rate decay (0 = last epoch)") parser.add_argument("--drop_gamma", type=float, default=0.2, help="multiplicative factor of learning rate decay") parser.add_argument("--no_lr_warmup", dest="lr_warmup", action="store_false", help="do not use learning rate warmup") # input/output parser.add_argument('--use_s2hr', action='store_true', default=True, help='use sentinel-2 high-resolution (10 m) bands') parser.add_argument('--use_s2mr', action='store_true', default=False, help='use sentinel-2 medium-resolution (20 m) bands') parser.add_argument('--use_s2lr', action='store_true', default=False, help='use sentinel-2 low-resolution (60 m) bands') parser.add_argument('--use_s1', action='store_true', default=True, help='use sentinel-1 data') #True for OSCD False for DFC2020 parser.add_argument('--no_savanna', action='store_true', default=False, help='ignore class savanna') # add new views #'/workplace/OSCD' #'/R740-75T/Chenyx/Workplace/OSCD' parser.add_argument('--data_dir_train', type=str, default='/workplace/DFC2020', help='path to training dataset') parser.add_argument('--model_path', type=str, default='./save', help='path to save model') parser.add_argument('--save', type=str, default='./EfusionS1', help='path to save linear classifier') opt = parser.parse_args() # set up saving name opt.save_path = os.path.join(opt.model_path, opt.save) if not os.path.isdir(opt.save_path): os.makedirs(opt.save_path) if not os.path.isdir(opt.data_dir_train): raise ValueError('data path not exist: {}'.format(opt.data_dir_train)) return opt def get_train_loader(args): # load datasets train_set = DFC2020(args.data_dir_train, subset="train", no_savanna=args.no_savanna, use_s2hr=args.use_s2hr, use_s2mr=args.use_s2mr, use_s2lr=args.use_s2lr, use_s1=args.use_s1, transform=True, unlabeled=True, crop_size=args.crop_size) #train_index='./utils/train_40.npy') n_classes = train_set.n_classes n_inputs = train_set.n_inputs args.no_savanna = train_set.no_savanna args.display_channels = train_set.display_channels args.brightness_factor = train_set.brightness_factor train_size = int(0.16 * len(train_set)) test_size = len(train_set) - train_size train_dataset, test_dataset = torch.utils.data.random_split(train_set, [train_size, test_size]) # set up dataloaders train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=False) return train_loader, n_inputs, n_classes class Trainer: def __init__(self, args, online_network, optimizer, criterion, scheduler, device): self.args = args DEFAULT_AUG = RandomApply(filters.GaussianBlur2d((11, 11), (1.5, 2.5)), p=0.5) augment_fn = None self.augment = default(augment_fn, DEFAULT_AUG) self.augment_type = ['Horizontalflip', 'VerticalFlip'] self.rot_agl = 15 self.dis_scl = 0.2 self.scl_sz = [0.8, 1.2] self.shear = [-0.2, 0.2] # self.mov_rg = random.uniform(-0.2, 0.2) self.aug_RHF = RandomHorizontalFlip(p=1) self.aug_RVF = RandomVerticalFlip(p=1) self.aug_ROT = RandomRotation(p=1, theta=self.rot_agl, interpolation='nearest') self.aug_PST = RandomPerspective(p=1, distortion_scale=0.3) self.aug_AFF = RandomAffine(p=1, theta=0, h_trans=random.uniform(-0.2, 0.2), v_trans=random.uniform(-0.2, 0.2), scale=None, shear=None, interpolation='nearest') self.online_network = online_network self.optimizer = optimizer self.scheduler = scheduler self.device = device self.savepath = args.save_path self.criterion = criterion self.max_epochs = args.epochs self.batch_size = args.batch_size self.num_workers = args.num_workers self.feat_dim = args.feat_dim self.lr_warmup = args.lr_warmup_val self.lr = args.lr self.lr_step = args.lr_step def aug_list(self, img, model, params): for i in range(len(model)): img = model[i](img, params[i]) return img def train(self, train_loader): niter = 0 for epoch_counter in range(self.max_epochs): train_loss = 0.0 iters = len(train_loader) for idx, batch in enumerate(train_loader): if self.lr_warmup < 50: lr_scale = (self.lr_warmup + 1) / 50 for pg in self.optimizer.param_groups: pg["lr"] = self.lr * lr_scale self.lr_warmup += 1 image = batch['image'] segmt = batch['segments'] loss = self.update(image, segmt) self.optimizer.zero_grad() loss.backward() self.optimizer.step() niter += 1 train_loss += loss.item() if self.lr_step == "cos" and self.lr_warmup >= 50: self.scheduler.step(epoch_counter + idx / iters) if self.lr_step == "step": self.scheduler.step() train_loss = train_loss / len(train_loader) print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch_counter, train_loss)) # save checkpoints if (epoch_counter + 1) % 100 == 0: self.save_model(os.path.join(self.savepath, 'twins_epoch_{epoch}_{loss}.pth'.format(epoch=epoch_counter, loss=train_loss))) torch.cuda.empty_cache() def update(self, image, segmt): args = self.args sample_num = 1 aug_type = random.sample(self.augment_type, sample_num) # augmentations model = [] param = [] if 'Horizontalflip' in aug_type: model.append(self.aug_RHF) param.append(RandomHorizontalFlip_params(0.5, image.shape[0], image.shape[-2:], self.device, image.dtype)) if 'VerticalFlip' in aug_type: model.append(self.aug_RVF) param.append(RandomVerticalFlip_params(0.5, image.shape[0], image.shape[-2:], self.device, image.dtype)) model.append(self.aug_AFF) param.append(RandomAffine_params(1.0, 0.0, random.uniform(-0.2, 0.2), random.uniform(-0.2, 0.2), None, None, image.shape[0], image.shape[-2:], self.device, image.dtype)) # split input _, image = torch.split(image, [4, 2], dim=1) batch_view_1 = image.to(self.device) batch_view_2 = image.to(self.device) # tranforme one input view batch_view_1 = self.aug_list(batch_view_1, model, param) # 32 batch_view_1 = batch_view_1[:, :, 8: 24, 8: 24] batch_view_2 = batch_view_2[:, :, 8: 24, 8: 24] batch_segm_2 = segmt[:, 8: 24, 8: 24] # compute query feature l_feature1, l_feature2, loss_vq = self.online_network(batch_view_1, batch_view_2, mode=0) l_feature2 = self.aug_list(l_feature2, model, param) # mask no-overlap with torch.no_grad(): batch_segm_2 = batch_segm_2.unsqueeze(dim=1) batch_segm_2 = self.aug_list(batch_segm_2.float(), model, param)[:, 0, :, :] ones = self.mask_spix(batch_segm_2) one_mask = ones.long().eq(1).to(self.device) batch_segm_2 = batch_segm_2.long().to(self.device) l_feature1, l_feature2= self.get_spix_data(batch_segm_2, one_mask, l_feature1, l_feature2) # pixel loss loss = self.criterion(l_feature1, l_feature2) + loss_vq return loss def save_model(self, PATH): print('==> Saving...') state = { 'online_network_state_dict': self.online_network.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), } torch.save(state, PATH) # help release GPU memory del state def get_spix_data(self, batch_segm_2, one_mask, inFeats1, inFeats2): bs, C, H, W = inFeats1.shape batch_segm_2 = batch_segm_2 * one_mask one_mask = one_mask.contiguous().view(-1) new_seg = batch_segm_2.view(-1).contiguous() values_idx = new_seg[one_mask] outFeats1 = torch.zeros((len(values_idx), C)).to(self.device) outFeats2 = torch.zeros((len(values_idx), C)).to(self.device) unique = torch.unique(values_idx, sorted=False, return_inverse=False, dim=0) for i in unique: s0 = batch_segm_2 == i s0_idx = values_idx == i spix_idx = s0.sum(axis=1).sum(axis=1) == 0 ex_dim_s0 = s0[:, None, :, :] mask_nums = s0.sum(axis=1).sum(axis=1) mask_nums[mask_nums == 0] = 1 mask_nums = mask_nums[:, None] masked1 = ex_dim_s0 * inFeats1 masked2 = ex_dim_s0 * inFeats2 ## first sum_sup_feats1 = masked1.sum(axis=2).sum(axis=2) avg_sup_feats1 = sum_sup_feats1 / mask_nums outFeats1[s0_idx, :] = avg_sup_feats1[~spix_idx, :] ## second sum_sup_feats2 = masked2.sum(axis=2).sum(axis=2) avg_sup_feats2 = sum_sup_feats2 / mask_nums outFeats2[s0_idx, :] = avg_sup_feats2[~spix_idx, :] return outFeats1, outFeats2 def mask_spix(self, image): b, w, h = image.shape zero = torch.zeros((b, w, h)) samples = np.random.randint(w, size=(200, 2)) for i in range(b): img_i = image[i][samples[:, 0], samples[:, 1]] val_i, index = self.unique(img_i) if len(val_i) > 0 and val_i[0] == 0: val_i = val_i[1::] index = index[1::] # print(val_i) if len(index) == 1: unique_i = samples[index] zero[i][unique_i[0], unique_i[1]] = 1 elif len(index) > 1: unique_i = samples[index] zero[i][unique_i[:, 0], unique_i[:, 1]] = 1 return zero def unique(self, x, dim=0): unique, inverse = torch.unique( x, sorted=True, return_inverse=True, dim=dim) perm = torch.arange(inverse.size(0), dtype=inverse.dtype, device=inverse.device) inverse, perm = inverse.flip([0]), perm.flip([0]) return unique, inverse.new_empty(unique.size(0)).scatter_(0, inverse, perm) def main(): # parse the args args = parse_option() # set flags for GPU processing if available #device = 'cuda' if torch.cuda.is_available() else 'cpu' device = 'cuda' # set the data loader train_loader, n_inputs, n_classes = get_train_loader(args) args.n_inputs = n_inputs args.n_classes = n_classes # set the model online_network = E_Fusion(width=1, in_channel=2, in_dim=args.in_dim, feat_dim=args.feat_dim).to(device) ## load pre-trained model if defined if args.resume: try: print('loading pretrained models') checkpoints_folder = os.path.join('.', 'save/EfusionS1') # load pre-trained parameters load_params = torch.load( os.path.join(os.path.join(checkpoints_folder, 'twins_epoch_599_10.689021110534668.pth')), map_location=device) online_network.load_state_dict(load_params['online_network_state_dict'], strict=False) except FileNotFoundError: print("Pre-trained weights not found. Training from scratch.") # target encoder criterion = HardNegtive_loss() optimizer = torch.optim.Adam(online_network.parameters(), lr=3e-4, weight_decay=1e-4) scheduler = get_scheduler(optimizer, args) args.lr_warmup_val = 0 if args.lr_warmup else 50 trainer = Trainer(args, online_network=online_network, optimizer=optimizer, criterion=criterion, scheduler=scheduler, device=device) trainer.train(train_loader) if __name__ == '__main__': os.environ["CUDA_VISIBLE_DEVICES"] = "0" seed_torch(seed=1024) main()
0.644673
0.108614
# 当前脚本只能在 Windows 下运行 # 可以使用 Python 3 的语法,Windows 下大家约定用 Python 3.7 以上 import os import shutil import subprocess import sys import hashlib import socket import time import build_linux_yaml_template as template NOW_TEXT = time.strftime("%Y-%m-%d_%H_%M_%S%z", time.localtime()) # 要保证有 2 位数来表示子游戏类型,只能 2 位,不能多不能少。 DIR_LIST = ["hall","login","backstage","shop","statistics"] DIR_NAME = "linux_server_" + NOW_TEXT# 临时打包目录,打包完后删除 EXECUTE_NAME = "chat_server.exe" # 打包出来的可执行文件名 NEED_HASH_DIR_LIST = [ # 后续优化: 不手工维护这个表了,自动递归全部目录就好了,发现 *.go 就 hash "execute", "hall", "login", "shop", "backstage", "mongo_init", "easygo", "for_game", "pb", "deleter", ] FINGERPRINT_FILE_NAME = DIR_NAME + "/fingerprint_{now}.txt".format(now=NOW_TEXT) # CONFIG_DIR_NAME = "config_package" DEFAULT_HOST = '192.168.50.27' DEFAULT_DB='192.168.50.27' # 检查有没有改了没有提交的或是新的 *.go 文件 def has_change_or_new_file(): code, s = subprocess.getstatusoutput("git status") if code != 0: raise Exception(s) return ".go" in s def build(dir_name): os.chdir("execute") print("准备编译可执行文件 {} ...".format(EXECUTE_NAME)) #text = "set CGO_ENABLED=0&&set GOOS=linux&&set GOARCH=amd64&&go build -o ../{dir_name}/{exe_name}".format(dir_name=dir_name, exe_name=EXECUTE_NAME) text = "go build -o ../{dir_name}/{exe_name}".format(dir_name=dir_name, exe_name=EXECUTE_NAME) code,s = subprocess.getstatusoutput(text) #必须在同一个线程运行 否则不成功 if code != 0: raise Exception(s) print("编译成功") os.chdir(".." ) def deal_yaml_json_py_etc(dir_name, dir_list, is_full, group, protocol, host,db): # 打包linux版本文件夹 if not is_full: return os.chdir("execute") os.chdir("../%s"%dir_name) # 复制文件到当前目录 shutil.copy("../start.py", "./") shutil.copy("../stop_by_pid_file.py", "./") shutil.copy("../stop_by_grep.py", "./") shutil.copy("../backup.py", "./") shutil.copy("../deploy.py", "./") content = template.TEMPLATE_SHARE.format(group=group,center="127.0.0.1",db="127.0.0.1") with open("./config_share.yaml", "w", encoding="utf-8") as f: f.write(content) content = template.TEMPLATE_HALL_SECRET # 直接写,无需 format .format(group=group, host=host) with open("./config_hall_secret.yaml", "w", encoding="utf-8") as f: f.write(content) # os.mkdir(CONFIG_DIR_NAME) # os.chdir(CONFIG_DIR_NAME) # os.system('xcopy "../../cheat" "cheat" /s /e /i /y') # os.system('xcopy "../../config" "config" /s /e /i /y') # os.chdir("../") for dir in dir_list: #把配置文件复制到各个文件夹下 os.mkdir(dir) print("创建 %s\t子目录,并生成了 yaml 配置文件进去 "%(dir,)) os.chdir(dir) if dir == "hall": content = template.TEMPLATE_HALL.format(group=group, host=host) with open("./config_hall.yaml", "w", encoding="utf-8") as f: f.write(content) elif dir == "login": content = template.TEMPLATE_LOGIN.format(group=group, host=host) with open("./config_login.yaml", "w", encoding="utf-8") as f: f.write(content) elif dir == "shop": content = template.TEMPLATE_SHOP.format(group=group, host=host) with open("./config_shop.yaml", "w", encoding="utf-8") as f: f.write(content) elif dir == "backstage": content = template.TEMPLATE_BACKSTAGE.format(group=group, host=host) with open("./config_backstage.yaml", "w", encoding="utf-8") as f: f.write(content) shutil.copy("../../backstage/version.json", "./") shutil.copy("../../backstage/tfserver.json", "./") elif dir == "statistics": content = template.TEMPLATE_STATISTICS.format(group=group, host=host) with open("./config_statistics.yaml", "w", encoding="utf-8") as f: f.write(content) else: raise Exception("未知的目录 "+ dir) os.mkdir("logs") os.chdir("../") os.chdir("../") def package_zip(dir_name, is_full): # 把打包文件夹压缩成zip文件 print("开始压缩 %s 目录,耗时较长,耐心等候 ...... " %(dir_name,)) if is_full: t = "full" else: t = "execute" name = "%s_%s.zip" %(dir_name, t) text = "7z.exe -tZip a %s ./%s -mx9"%(name, dir_name) code, s = subprocess.getstatusoutput(text) if code != 0: text = "安装7z压缩软件了吗???设置7z的环境变量了吗???" raise Exception(text + s) print("压缩 OK,包名是 "+name) def remove_dir(dir_name): # 删除打包文件夹 if os.path.exists(dir_name): print("删除临时打包目录 "+ dir_name) shutil.rmtree(dir_name) def hash_file(file_name): # hash 出 md5 值 if not os.path.isfile(file_name): return myhash = hashlib.md5() with open(file_name,'rb') as f: while True: b = f.read(8096) if not b: break myhash.update(b) return myhash.hexdigest() def hash_all_file(dir_name): # 获取到所有当前路径下的文件 lst = [] for (root, dirs, files) in os.walk(dir_name): _ = dirs for file_name in files: s1 = hash_file(root+"\\"+file_name) s2 = "%s\\%s: %s\n" % (root,file_name, s1) lst.append(s2) return "".join(lst) def gen_fingerprint_file(fingerprint_file, need_hash_dir_list, branch_name): # 哈希 *.go 代码文件 if os.path.exists(fingerprint_file): # 检测如果有这个文件就删除新建 os.remove(fingerprint_file) with open(fingerprint_file,"a",encoding="utf8") as f: host_name = socket.gethostname() # 获取本机计算机名 f.write("计算机名: %s\n"%host_name) f.write("打包时间: %s\n" % NOW_TEXT) f.write("打包工作目录: %s\n" % os.getcwd()) f.write("打包分支名: {}\n".format(branch_name)) # 获取当前提交版本号 code, s = subprocess.getstatusoutput("git rev-parse HEAD") f.write("最后 Commit: %s\n" % s) if code != 0: raise Exception(s) # 获取当前环境 Golang 版本 code,s = subprocess.getstatusoutput("go version") if code != 0: raise Exception(s) f.write("打包机器 Golang 版本: %s" % s) f.write("\n") digest = hash_file("./{dir_name}/{exe_name}".format(dir_name=DIR_NAME, exe_name=EXECUTE_NAME)) f.write("可执行文件 {} MD5 值: {}\n".format(EXECUTE_NAME, digest)) f.write("\n各源代码文件 MD5 值:\n") for dir_name in need_hash_dir_list: # 循环遍历所有需要 hash 的目录 text = hash_all_file(dir_name) f.write(text) print("生成各 *.go 源码文件的 hash 值成功") def main(): code, branch_name = subprocess.getstatusoutput("git symbolic-ref --short -q HEAD") if code != 0: raise Exception(branch_name) if branch_name != "master": while True: q = input("严重警告!!!!!! 当前分支是 {},你真的要对这个分支而不是 master 进行打包 (输入 y 或 n): ".format(branch_name)) if q == "": continue elif q == 'y': break else: print("中止打包") return if has_change_or_new_file(): while True: q = input("严重警告!!!!!! 发现有新的或是改动未提交的 go 文件,是否仍要继续打包? (输入 y 或 n): ") if q == "": continue elif q == 'y': break else: print("中止打包") return while True: s = input("打完整包还是只打可执行文件?(输入 full 代表打完整包,输入 exe 代表打可执行文件): ") if s == "": continue if s in ["full", "exe"]: is_full = {"full":True, "exe":False}[s] break if is_full: while True: group = input("请输入服务器组,用于各监听端口的最后一位数,有效值为 0 - 9: ") if len(group) == 1 and group.isdigit(): break while True: protocol = input("游戏客户端和服务器走什么协议?请输入 ws 或 wss : ") if protocol in ("ws", "wss"): break host = input("请输入目标服务器的外网 IP 或域名(直接回车则是 {}): ".format(DEFAULT_HOST)) if host == "": host = DEFAULT_HOST db = input("请输入mongodb的IP(直接回车则是 {}): ".format(DEFAULT_DB)) if db == "": db = DEFAULT_DB while True: is_all = input("打包服务器all表示全部[login 、hall、backstage、shop、statistics]其中一个): ") if is_all == "all" or is_all in DIR_LIST: break while True: s = input("是否压缩? (输入 y 或 n): ") if s == "": continue if s in ["y", "n"]: compress = {"y":True, "n":False}[s] break remove_dir(DIR_NAME) os.mkdir(DIR_NAME) build(DIR_NAME) gen_fingerprint_file(FINGERPRINT_FILE_NAME, NEED_HASH_DIR_LIST, branch_name) if is_full: server_list = [] if is_all =="all": server_list=DIR_LIST else: server_list=[is_all] deal_yaml_json_py_etc(DIR_NAME, server_list, is_full, group, protocol, host,db) if compress: package_zip(DIR_NAME, is_full) # 压缩 remove_dir(DIR_NAME) # 删除临时打包文件夹 if __name__ == "__main__": main()
build_windows_server.py
# 当前脚本只能在 Windows 下运行 # 可以使用 Python 3 的语法,Windows 下大家约定用 Python 3.7 以上 import os import shutil import subprocess import sys import hashlib import socket import time import build_linux_yaml_template as template NOW_TEXT = time.strftime("%Y-%m-%d_%H_%M_%S%z", time.localtime()) # 要保证有 2 位数来表示子游戏类型,只能 2 位,不能多不能少。 DIR_LIST = ["hall","login","backstage","shop","statistics"] DIR_NAME = "linux_server_" + NOW_TEXT# 临时打包目录,打包完后删除 EXECUTE_NAME = "chat_server.exe" # 打包出来的可执行文件名 NEED_HASH_DIR_LIST = [ # 后续优化: 不手工维护这个表了,自动递归全部目录就好了,发现 *.go 就 hash "execute", "hall", "login", "shop", "backstage", "mongo_init", "easygo", "for_game", "pb", "deleter", ] FINGERPRINT_FILE_NAME = DIR_NAME + "/fingerprint_{now}.txt".format(now=NOW_TEXT) # CONFIG_DIR_NAME = "config_package" DEFAULT_HOST = '192.168.50.27' DEFAULT_DB='192.168.50.27' # 检查有没有改了没有提交的或是新的 *.go 文件 def has_change_or_new_file(): code, s = subprocess.getstatusoutput("git status") if code != 0: raise Exception(s) return ".go" in s def build(dir_name): os.chdir("execute") print("准备编译可执行文件 {} ...".format(EXECUTE_NAME)) #text = "set CGO_ENABLED=0&&set GOOS=linux&&set GOARCH=amd64&&go build -o ../{dir_name}/{exe_name}".format(dir_name=dir_name, exe_name=EXECUTE_NAME) text = "go build -o ../{dir_name}/{exe_name}".format(dir_name=dir_name, exe_name=EXECUTE_NAME) code,s = subprocess.getstatusoutput(text) #必须在同一个线程运行 否则不成功 if code != 0: raise Exception(s) print("编译成功") os.chdir(".." ) def deal_yaml_json_py_etc(dir_name, dir_list, is_full, group, protocol, host,db): # 打包linux版本文件夹 if not is_full: return os.chdir("execute") os.chdir("../%s"%dir_name) # 复制文件到当前目录 shutil.copy("../start.py", "./") shutil.copy("../stop_by_pid_file.py", "./") shutil.copy("../stop_by_grep.py", "./") shutil.copy("../backup.py", "./") shutil.copy("../deploy.py", "./") content = template.TEMPLATE_SHARE.format(group=group,center="127.0.0.1",db="127.0.0.1") with open("./config_share.yaml", "w", encoding="utf-8") as f: f.write(content) content = template.TEMPLATE_HALL_SECRET # 直接写,无需 format .format(group=group, host=host) with open("./config_hall_secret.yaml", "w", encoding="utf-8") as f: f.write(content) # os.mkdir(CONFIG_DIR_NAME) # os.chdir(CONFIG_DIR_NAME) # os.system('xcopy "../../cheat" "cheat" /s /e /i /y') # os.system('xcopy "../../config" "config" /s /e /i /y') # os.chdir("../") for dir in dir_list: #把配置文件复制到各个文件夹下 os.mkdir(dir) print("创建 %s\t子目录,并生成了 yaml 配置文件进去 "%(dir,)) os.chdir(dir) if dir == "hall": content = template.TEMPLATE_HALL.format(group=group, host=host) with open("./config_hall.yaml", "w", encoding="utf-8") as f: f.write(content) elif dir == "login": content = template.TEMPLATE_LOGIN.format(group=group, host=host) with open("./config_login.yaml", "w", encoding="utf-8") as f: f.write(content) elif dir == "shop": content = template.TEMPLATE_SHOP.format(group=group, host=host) with open("./config_shop.yaml", "w", encoding="utf-8") as f: f.write(content) elif dir == "backstage": content = template.TEMPLATE_BACKSTAGE.format(group=group, host=host) with open("./config_backstage.yaml", "w", encoding="utf-8") as f: f.write(content) shutil.copy("../../backstage/version.json", "./") shutil.copy("../../backstage/tfserver.json", "./") elif dir == "statistics": content = template.TEMPLATE_STATISTICS.format(group=group, host=host) with open("./config_statistics.yaml", "w", encoding="utf-8") as f: f.write(content) else: raise Exception("未知的目录 "+ dir) os.mkdir("logs") os.chdir("../") os.chdir("../") def package_zip(dir_name, is_full): # 把打包文件夹压缩成zip文件 print("开始压缩 %s 目录,耗时较长,耐心等候 ...... " %(dir_name,)) if is_full: t = "full" else: t = "execute" name = "%s_%s.zip" %(dir_name, t) text = "7z.exe -tZip a %s ./%s -mx9"%(name, dir_name) code, s = subprocess.getstatusoutput(text) if code != 0: text = "安装7z压缩软件了吗???设置7z的环境变量了吗???" raise Exception(text + s) print("压缩 OK,包名是 "+name) def remove_dir(dir_name): # 删除打包文件夹 if os.path.exists(dir_name): print("删除临时打包目录 "+ dir_name) shutil.rmtree(dir_name) def hash_file(file_name): # hash 出 md5 值 if not os.path.isfile(file_name): return myhash = hashlib.md5() with open(file_name,'rb') as f: while True: b = f.read(8096) if not b: break myhash.update(b) return myhash.hexdigest() def hash_all_file(dir_name): # 获取到所有当前路径下的文件 lst = [] for (root, dirs, files) in os.walk(dir_name): _ = dirs for file_name in files: s1 = hash_file(root+"\\"+file_name) s2 = "%s\\%s: %s\n" % (root,file_name, s1) lst.append(s2) return "".join(lst) def gen_fingerprint_file(fingerprint_file, need_hash_dir_list, branch_name): # 哈希 *.go 代码文件 if os.path.exists(fingerprint_file): # 检测如果有这个文件就删除新建 os.remove(fingerprint_file) with open(fingerprint_file,"a",encoding="utf8") as f: host_name = socket.gethostname() # 获取本机计算机名 f.write("计算机名: %s\n"%host_name) f.write("打包时间: %s\n" % NOW_TEXT) f.write("打包工作目录: %s\n" % os.getcwd()) f.write("打包分支名: {}\n".format(branch_name)) # 获取当前提交版本号 code, s = subprocess.getstatusoutput("git rev-parse HEAD") f.write("最后 Commit: %s\n" % s) if code != 0: raise Exception(s) # 获取当前环境 Golang 版本 code,s = subprocess.getstatusoutput("go version") if code != 0: raise Exception(s) f.write("打包机器 Golang 版本: %s" % s) f.write("\n") digest = hash_file("./{dir_name}/{exe_name}".format(dir_name=DIR_NAME, exe_name=EXECUTE_NAME)) f.write("可执行文件 {} MD5 值: {}\n".format(EXECUTE_NAME, digest)) f.write("\n各源代码文件 MD5 值:\n") for dir_name in need_hash_dir_list: # 循环遍历所有需要 hash 的目录 text = hash_all_file(dir_name) f.write(text) print("生成各 *.go 源码文件的 hash 值成功") def main(): code, branch_name = subprocess.getstatusoutput("git symbolic-ref --short -q HEAD") if code != 0: raise Exception(branch_name) if branch_name != "master": while True: q = input("严重警告!!!!!! 当前分支是 {},你真的要对这个分支而不是 master 进行打包 (输入 y 或 n): ".format(branch_name)) if q == "": continue elif q == 'y': break else: print("中止打包") return if has_change_or_new_file(): while True: q = input("严重警告!!!!!! 发现有新的或是改动未提交的 go 文件,是否仍要继续打包? (输入 y 或 n): ") if q == "": continue elif q == 'y': break else: print("中止打包") return while True: s = input("打完整包还是只打可执行文件?(输入 full 代表打完整包,输入 exe 代表打可执行文件): ") if s == "": continue if s in ["full", "exe"]: is_full = {"full":True, "exe":False}[s] break if is_full: while True: group = input("请输入服务器组,用于各监听端口的最后一位数,有效值为 0 - 9: ") if len(group) == 1 and group.isdigit(): break while True: protocol = input("游戏客户端和服务器走什么协议?请输入 ws 或 wss : ") if protocol in ("ws", "wss"): break host = input("请输入目标服务器的外网 IP 或域名(直接回车则是 {}): ".format(DEFAULT_HOST)) if host == "": host = DEFAULT_HOST db = input("请输入mongodb的IP(直接回车则是 {}): ".format(DEFAULT_DB)) if db == "": db = DEFAULT_DB while True: is_all = input("打包服务器all表示全部[login 、hall、backstage、shop、statistics]其中一个): ") if is_all == "all" or is_all in DIR_LIST: break while True: s = input("是否压缩? (输入 y 或 n): ") if s == "": continue if s in ["y", "n"]: compress = {"y":True, "n":False}[s] break remove_dir(DIR_NAME) os.mkdir(DIR_NAME) build(DIR_NAME) gen_fingerprint_file(FINGERPRINT_FILE_NAME, NEED_HASH_DIR_LIST, branch_name) if is_full: server_list = [] if is_all =="all": server_list=DIR_LIST else: server_list=[is_all] deal_yaml_json_py_etc(DIR_NAME, server_list, is_full, group, protocol, host,db) if compress: package_zip(DIR_NAME, is_full) # 压缩 remove_dir(DIR_NAME) # 删除临时打包文件夹 if __name__ == "__main__": main()
0.176743
0.092074
import sys import torch from pathlib import Path BASE_DIR = Path(__file__).resolve().parents[2] sys.path.append(str(BASE_DIR)) from torch.utils.data import Dataset from leaf.nlp_utils.tokenizer import Tokenizer class Sent140Dataset(Dataset): def __init__(self, client_id: int, client_str: str, data: list, targets: list, is_to_tokens=True, tokenizer=None): """get `Dataset` for sent140 dataset Args: client_id (int): client id client_str (str): client name string data (list): sentence list data targets (list): next-character target list is_to_tokens (bool, optional), if tokenize data by using tokenizer tokenizer (Tokenizer, optional), tokenizer """ self.client_id = client_id self.client_str = client_str self.data = data self.targets = targets self.data_token = [] self.data_seq = [] self.targets_tensor = [] self.vocab = None self.tokenizer = tokenizer if tokenizer else Tokenizer('normal') self.maxlen = None self._process_data_target() if is_to_tokens: self._data2token() def _process_data_target(self): """process client's data and target """ self.data = [e[4] for e in self.data] self.targets = torch.tensor(self.targets, dtype=torch.long) def _data2token(self): assert self.data is not None for sen in self.data: self.data_token.append(self.tokenizer(sen)) def token2seq(self, vocab: 'Vocab', maxlen: int): """transform token data to indices sequence by `vocab` Args: vocab (fedlab_benchmark.leaf.nlp_utils.vocab): vocab for data_token maxlen (int): max length of sentence Returns: list of integer list for data_token, and a list of tensor target """ if len(self.data_seq) > 0: self.data_seq.clear() self.targets_tensor.clear() self.vocab = vocab self.maxlen = maxlen assert self.data_token is not None for tokens in self.data_token: self.data_seq.append(self.__encode_tokens(tokens)) for target in self.targets: self.targets_tensor.append(torch.tensor(target)) def __encode_tokens(self, tokens) -> torch.Tensor: """encode `maxlen` length for token_data to get indices list in `self.vocab` if one sentence length is shorter than maxlen, it will use pad word for padding to maxlen if one sentence length is longer than maxlen, it will cut the first max_words words Args: tokens (list[str]): data after tokenizer Returns: integer list of indices with `maxlen` length for tokens input """ pad_word = 0 x = [pad_word for _ in range(self.maxlen)] temp = tokens[:self.maxlen] for idx, word in enumerate(temp): x[idx] = self.vocab.get_index(word) return torch.tensor(x) def __len__(self): return len(self.targets_tensor) def __getitem__(self, item): return self.data_seq[item], self.targets_tensor[item]
fedlab_benchmarks/leaf/dataset/sent140_dataset.py
import sys import torch from pathlib import Path BASE_DIR = Path(__file__).resolve().parents[2] sys.path.append(str(BASE_DIR)) from torch.utils.data import Dataset from leaf.nlp_utils.tokenizer import Tokenizer class Sent140Dataset(Dataset): def __init__(self, client_id: int, client_str: str, data: list, targets: list, is_to_tokens=True, tokenizer=None): """get `Dataset` for sent140 dataset Args: client_id (int): client id client_str (str): client name string data (list): sentence list data targets (list): next-character target list is_to_tokens (bool, optional), if tokenize data by using tokenizer tokenizer (Tokenizer, optional), tokenizer """ self.client_id = client_id self.client_str = client_str self.data = data self.targets = targets self.data_token = [] self.data_seq = [] self.targets_tensor = [] self.vocab = None self.tokenizer = tokenizer if tokenizer else Tokenizer('normal') self.maxlen = None self._process_data_target() if is_to_tokens: self._data2token() def _process_data_target(self): """process client's data and target """ self.data = [e[4] for e in self.data] self.targets = torch.tensor(self.targets, dtype=torch.long) def _data2token(self): assert self.data is not None for sen in self.data: self.data_token.append(self.tokenizer(sen)) def token2seq(self, vocab: 'Vocab', maxlen: int): """transform token data to indices sequence by `vocab` Args: vocab (fedlab_benchmark.leaf.nlp_utils.vocab): vocab for data_token maxlen (int): max length of sentence Returns: list of integer list for data_token, and a list of tensor target """ if len(self.data_seq) > 0: self.data_seq.clear() self.targets_tensor.clear() self.vocab = vocab self.maxlen = maxlen assert self.data_token is not None for tokens in self.data_token: self.data_seq.append(self.__encode_tokens(tokens)) for target in self.targets: self.targets_tensor.append(torch.tensor(target)) def __encode_tokens(self, tokens) -> torch.Tensor: """encode `maxlen` length for token_data to get indices list in `self.vocab` if one sentence length is shorter than maxlen, it will use pad word for padding to maxlen if one sentence length is longer than maxlen, it will cut the first max_words words Args: tokens (list[str]): data after tokenizer Returns: integer list of indices with `maxlen` length for tokens input """ pad_word = 0 x = [pad_word for _ in range(self.maxlen)] temp = tokens[:self.maxlen] for idx, word in enumerate(temp): x[idx] = self.vocab.get_index(word) return torch.tensor(x) def __len__(self): return len(self.targets_tensor) def __getitem__(self, item): return self.data_seq[item], self.targets_tensor[item]
0.751283
0.380442
from gemd.entity.object.base_object import BaseObject from gemd.entity.object.has_conditions import HasConditions from gemd.entity.object.has_parameters import HasParameters from gemd.entity.object.has_source import HasSource from gemd.entity.setters import validate_list class ProcessRun(BaseObject, HasConditions, HasParameters, HasSource): """ A process run. Processes transform zero or more input materials into exactly one output material. This includes links to conditions and parameters under which the process was performed, as well as soft links to the output material and each of the input ingredients. Parameters ---------- name: str, optional Name of the process run. uids: Map[str, str], optional A collection of `unique IDs <https://citrineinformatics.github.io/gemd-documentation/ specification/unique-identifiers/>`_. tags: List[str], optional `Tags <https://citrineinformatics.github.io/gemd-documentation/specification/tags/>`_ are hierarchical strings that store information about an entity. They can be used for filtering and discoverability. notes: str, optional Long-form notes about the process run. conditions: List[:class:`Condition <gemd.entity.attribute.condition.Condition>`], optional Conditions under which this process run occurs. parameters: List[:class:`Parameter <gemd.entity.attribute.parameter.Parameter>`], optional Parameters of this process run. spec: :class:`ProcessSpec <gemd.entity.object.process_spec.ProcessSpec>` Spec for this process run. file_links: List[:class:`FileLink <gemd.entity.file_link.FileLink>`], optional Links to associated files, with resource paths into the files API. source: :class:`PerformedSource\ <gemd.entity.source.performed_source.PerformedSource>`, optional Information about the person who performed the run and when. Attributes ---------- output_material: :class:`MaterialRun <gemd.entity.object.material_run.MaterialRun>` The material run that this process run produces. The link is established by creating the material run and settings its `process` field to this process run. ingredients: List[:class:`IngredientRun <gemd.entity.object.ingredient_run.IngredientRun>`] Ingredient runs that act as inputs to this process run. The link is established by creating each ingredient run and setting its `process` field to this process run. """ typ = "process_run" skip = {"_output_material", "_ingredients"} def __init__(self, name, *, spec=None, conditions=None, parameters=None, uids=None, tags=None, notes=None, file_links=None, source=None): from gemd.entity.object.ingredient_run import IngredientRun from gemd.entity.link_by_uid import LinkByUID BaseObject.__init__(self, name=name, uids=uids, tags=tags, notes=notes, file_links=file_links) HasConditions.__init__(self, conditions) HasParameters.__init__(self, parameters) HasSource.__init__(self, source) self._spec = None self.spec = spec self._output_material = None self._ingredients = validate_list(None, [IngredientRun, LinkByUID]) @property def output_material(self): """Get the output material run.""" return self._output_material @property def ingredients(self): """Get the input ingredient runs.""" return self._ingredients @property def spec(self): """Get the process spec.""" return self._spec @spec.setter def spec(self, spec): from gemd.entity.object.process_spec import ProcessSpec from gemd.entity.link_by_uid import LinkByUID if spec is None: self._spec = None elif isinstance(spec, (ProcessSpec, LinkByUID)): self._spec = spec else: raise TypeError("spec must be a ProcessSpec or LinkByUID: {}".format(spec)) @property def template(self): """Get the template of the spec, if applicable.""" from gemd.entity.object.process_spec import ProcessSpec if isinstance(self.spec, ProcessSpec): return self.spec.template else: return None def _dict_for_compare(self): """Support for recursive equals.""" base = super()._dict_for_compare() base['ingredients'] = self.ingredients return base
gemd/entity/object/process_run.py
from gemd.entity.object.base_object import BaseObject from gemd.entity.object.has_conditions import HasConditions from gemd.entity.object.has_parameters import HasParameters from gemd.entity.object.has_source import HasSource from gemd.entity.setters import validate_list class ProcessRun(BaseObject, HasConditions, HasParameters, HasSource): """ A process run. Processes transform zero or more input materials into exactly one output material. This includes links to conditions and parameters under which the process was performed, as well as soft links to the output material and each of the input ingredients. Parameters ---------- name: str, optional Name of the process run. uids: Map[str, str], optional A collection of `unique IDs <https://citrineinformatics.github.io/gemd-documentation/ specification/unique-identifiers/>`_. tags: List[str], optional `Tags <https://citrineinformatics.github.io/gemd-documentation/specification/tags/>`_ are hierarchical strings that store information about an entity. They can be used for filtering and discoverability. notes: str, optional Long-form notes about the process run. conditions: List[:class:`Condition <gemd.entity.attribute.condition.Condition>`], optional Conditions under which this process run occurs. parameters: List[:class:`Parameter <gemd.entity.attribute.parameter.Parameter>`], optional Parameters of this process run. spec: :class:`ProcessSpec <gemd.entity.object.process_spec.ProcessSpec>` Spec for this process run. file_links: List[:class:`FileLink <gemd.entity.file_link.FileLink>`], optional Links to associated files, with resource paths into the files API. source: :class:`PerformedSource\ <gemd.entity.source.performed_source.PerformedSource>`, optional Information about the person who performed the run and when. Attributes ---------- output_material: :class:`MaterialRun <gemd.entity.object.material_run.MaterialRun>` The material run that this process run produces. The link is established by creating the material run and settings its `process` field to this process run. ingredients: List[:class:`IngredientRun <gemd.entity.object.ingredient_run.IngredientRun>`] Ingredient runs that act as inputs to this process run. The link is established by creating each ingredient run and setting its `process` field to this process run. """ typ = "process_run" skip = {"_output_material", "_ingredients"} def __init__(self, name, *, spec=None, conditions=None, parameters=None, uids=None, tags=None, notes=None, file_links=None, source=None): from gemd.entity.object.ingredient_run import IngredientRun from gemd.entity.link_by_uid import LinkByUID BaseObject.__init__(self, name=name, uids=uids, tags=tags, notes=notes, file_links=file_links) HasConditions.__init__(self, conditions) HasParameters.__init__(self, parameters) HasSource.__init__(self, source) self._spec = None self.spec = spec self._output_material = None self._ingredients = validate_list(None, [IngredientRun, LinkByUID]) @property def output_material(self): """Get the output material run.""" return self._output_material @property def ingredients(self): """Get the input ingredient runs.""" return self._ingredients @property def spec(self): """Get the process spec.""" return self._spec @spec.setter def spec(self, spec): from gemd.entity.object.process_spec import ProcessSpec from gemd.entity.link_by_uid import LinkByUID if spec is None: self._spec = None elif isinstance(spec, (ProcessSpec, LinkByUID)): self._spec = spec else: raise TypeError("spec must be a ProcessSpec or LinkByUID: {}".format(spec)) @property def template(self): """Get the template of the spec, if applicable.""" from gemd.entity.object.process_spec import ProcessSpec if isinstance(self.spec, ProcessSpec): return self.spec.template else: return None def _dict_for_compare(self): """Support for recursive equals.""" base = super()._dict_for_compare() base['ingredients'] = self.ingredients return base
0.911962
0.395718
import requests import os import locale import click import json from sys import exit from importlib import import_module from subprocess import call from functools import partial # HELPER FUNCTIONS def _localization(): '''Returns a module object containing all the help strings according to the locale of the user''' try: locale.setlocale(locale.LC_ALL, '') return import_module('.locales.{}'.format(locale.getlocale()[0]), package='gitman') except ImportError: return import_module('.locales.en_US', package='gitman') loc = _localization() def api_call(method, token=None, verb=None): '''Sends an api call to github.com and returns the json contained. Args: method (str): Represents the URI to query for data. token (str or None): If provided, provides authorization for queries. Returns: tuple: Status code of the query and the json contained in the response. ''' if token: token = {'Authorization': 'token {}'.format(token)} data = verb('https://api.github.com/{}'.format(method), headers=token) try: return data.status_code, data.json() except json.decoder.JSONDecodeError: click.echo(loc.API_ERROR) exit(1) api_get = partial(api_call, verb=requests.get) api_post = partial(api_call, verb=requests.post) # END HELPER FUNCTIONS @click.group(help=loc.DESCRIPTION) @click.pass_context def main(ctx): '''Entry point.''' if not os.path.exists(os.path.expanduser('~/.gitman')): _git_setup() with open(os.path.join(os.environ['HOME'], '.gitman'), 'r') as cf: ctx.obj = cf.readline() @main.command(name='list', help=loc.LIST_HELP) @click.pass_obj @click.argument('user', nargs=1, required=False) def git_list(token, user): '''Lists the repositories of a user. Args: token (str): To provide authorization for the call if needed. user (str): User to target in Github. Outputs: The list of the repositories belonging to the user to stdin.''' if user: status_code, repos_data = api_get('users/{}/repos'.format(user)) else: status_code, repos_data = api_get('user/repos', token=token) if status_code == 200: click.echo(loc.LIST_USERNAME.format(repos_data[0]['owner']['login'])) for repo in repos_data: click.echo('* {} - {}'.format(repo['full_name'], repo['description'])) else: click.echo(loc.LIST_NOTFOUND.format(user)) def _git_setup(): '''Stores the token provided by the user for future use. Inputs: A string from stdout representing the Github OAuth token of the caller. Required at least the "repos" permission, for now. Outputs: A file ".gitman" on the $HOME of the user containing this token string. TODO: - Maybe allow the user to delete his own repositories if given a valid token. ''' with open(os.path.expanduser('~/.gitman'), 'w') as cf: click.echo(loc.SETUP_INSTRUCTIONS) token = click.prompt(loc.SETUP_INPUT, type=str) if click.confirm(loc.SETUP_CONFIRM, abort=True): cf.write(token) @main.command(name='setup', help=loc.SETUP_HELP) def git_setup(): '''CLI wrapper for _git_setup.''' _git_setup() @main.command(name='clone', help=loc.CLONE_HELP) @click.pass_obj @click.argument('repo', metavar='[USERNAME/]REPO') def git_clone(token, repo): '''Clones a given repository. Args: token (str): Always passed by the main software, serves to identify the user in case it is not specified in the repo name. repo (str): Name of the repository to clone, in the format <username?>/<repo_name>. Outputs: Calls 'git' and clones the specified repository in the CWD. ''' if len(repo.split('/')) == 1: _, user_data = api_get('user', token) repo = '{}/{}'.format(user_data['login'], repo) try: call(['git', 'clone', 'https://github.com/{}'.format(repo)]) except FileNotFoundError: click.echo(loc.CLONE_NOTFOUND) @main.command(name='fork', help=loc.FORK_HELP) @click.pass_obj @click.argument('repo', metavar='USERNAME/REPO') def git_fork(token, repo): '''Forks the repository of a user into the authenticated user. Args: token (str): Passed down by the main software, necessary to ID the user requesting the fork. repo (str): In the format <username>/<repository>, targets the repo to fork. ''' repo_data = repo.split('/') if len(repo_data) != 2: click.echo(loc.FORK_SYNTAXERROR) exit(1) status_code, result = api_post('repos/{0}/forks'.format(repo), token) if status_code == 202 and result['owner']['login'] != repo_data[0]: click.echo(loc.FORK_SUCCESS.format(result['full_name'], repo)) elif result['owner']['login'] == repo_data[0]: click.echo(loc.FORK_SELFERROR) else: click.echo(loc.FORK_NOTFOUND.format(repo)) if __name__ == '__main__': main()
venv/lib/python3.7/site-packages/gitman/gitman.py
import requests import os import locale import click import json from sys import exit from importlib import import_module from subprocess import call from functools import partial # HELPER FUNCTIONS def _localization(): '''Returns a module object containing all the help strings according to the locale of the user''' try: locale.setlocale(locale.LC_ALL, '') return import_module('.locales.{}'.format(locale.getlocale()[0]), package='gitman') except ImportError: return import_module('.locales.en_US', package='gitman') loc = _localization() def api_call(method, token=None, verb=None): '''Sends an api call to github.com and returns the json contained. Args: method (str): Represents the URI to query for data. token (str or None): If provided, provides authorization for queries. Returns: tuple: Status code of the query and the json contained in the response. ''' if token: token = {'Authorization': 'token {}'.format(token)} data = verb('https://api.github.com/{}'.format(method), headers=token) try: return data.status_code, data.json() except json.decoder.JSONDecodeError: click.echo(loc.API_ERROR) exit(1) api_get = partial(api_call, verb=requests.get) api_post = partial(api_call, verb=requests.post) # END HELPER FUNCTIONS @click.group(help=loc.DESCRIPTION) @click.pass_context def main(ctx): '''Entry point.''' if not os.path.exists(os.path.expanduser('~/.gitman')): _git_setup() with open(os.path.join(os.environ['HOME'], '.gitman'), 'r') as cf: ctx.obj = cf.readline() @main.command(name='list', help=loc.LIST_HELP) @click.pass_obj @click.argument('user', nargs=1, required=False) def git_list(token, user): '''Lists the repositories of a user. Args: token (str): To provide authorization for the call if needed. user (str): User to target in Github. Outputs: The list of the repositories belonging to the user to stdin.''' if user: status_code, repos_data = api_get('users/{}/repos'.format(user)) else: status_code, repos_data = api_get('user/repos', token=token) if status_code == 200: click.echo(loc.LIST_USERNAME.format(repos_data[0]['owner']['login'])) for repo in repos_data: click.echo('* {} - {}'.format(repo['full_name'], repo['description'])) else: click.echo(loc.LIST_NOTFOUND.format(user)) def _git_setup(): '''Stores the token provided by the user for future use. Inputs: A string from stdout representing the Github OAuth token of the caller. Required at least the "repos" permission, for now. Outputs: A file ".gitman" on the $HOME of the user containing this token string. TODO: - Maybe allow the user to delete his own repositories if given a valid token. ''' with open(os.path.expanduser('~/.gitman'), 'w') as cf: click.echo(loc.SETUP_INSTRUCTIONS) token = click.prompt(loc.SETUP_INPUT, type=str) if click.confirm(loc.SETUP_CONFIRM, abort=True): cf.write(token) @main.command(name='setup', help=loc.SETUP_HELP) def git_setup(): '''CLI wrapper for _git_setup.''' _git_setup() @main.command(name='clone', help=loc.CLONE_HELP) @click.pass_obj @click.argument('repo', metavar='[USERNAME/]REPO') def git_clone(token, repo): '''Clones a given repository. Args: token (str): Always passed by the main software, serves to identify the user in case it is not specified in the repo name. repo (str): Name of the repository to clone, in the format <username?>/<repo_name>. Outputs: Calls 'git' and clones the specified repository in the CWD. ''' if len(repo.split('/')) == 1: _, user_data = api_get('user', token) repo = '{}/{}'.format(user_data['login'], repo) try: call(['git', 'clone', 'https://github.com/{}'.format(repo)]) except FileNotFoundError: click.echo(loc.CLONE_NOTFOUND) @main.command(name='fork', help=loc.FORK_HELP) @click.pass_obj @click.argument('repo', metavar='USERNAME/REPO') def git_fork(token, repo): '''Forks the repository of a user into the authenticated user. Args: token (str): Passed down by the main software, necessary to ID the user requesting the fork. repo (str): In the format <username>/<repository>, targets the repo to fork. ''' repo_data = repo.split('/') if len(repo_data) != 2: click.echo(loc.FORK_SYNTAXERROR) exit(1) status_code, result = api_post('repos/{0}/forks'.format(repo), token) if status_code == 202 and result['owner']['login'] != repo_data[0]: click.echo(loc.FORK_SUCCESS.format(result['full_name'], repo)) elif result['owner']['login'] == repo_data[0]: click.echo(loc.FORK_SELFERROR) else: click.echo(loc.FORK_NOTFOUND.format(repo)) if __name__ == '__main__': main()
0.396419
0.110064
import sys from wordcloud import WordCloud,ImageColorGenerator from collections import Counter import jieba.posseg as psg import matplotlib.pyplot as plt # 对文本分词并标注词性,并缓存到文件 def cut_and_cache(text): # 将文本分词,并附带上词性,因为数据量比较大,防止每次运行脚本都花大量时间,所以第一次分词后就将结果存入文件cut_result.txt中 # 相当于做一个缓存,格式为每个词占一行,每一行的内容为: # 词,词性 words_with_attr = [(x.word, x.flag) for x in psg.cut(text) if len(x.word) >= 2] with open('cut_result.txt', 'w+') as f: for x in words_with_attr: f.write('{0}\t{1}\n'.format(x[0], x[1])) return words_with_attr # 从cut_result.txt中读取带词性的分词结果列表 def read_cut_result(): words_with_attr = [] with open('cut_result.txt', 'r') as f: for x in f.readlines(): # 这里解码成utf-8格式,是为了防止后面生成词云的时候出现乱码 x = x.decode('utf-8') pair = x.split() if len(pair) < 2: continue words_with_attr.append((pair[0], pair[1])) return words_with_attr #  统计在分词表中出现次数排名前topn的词的列表,并将结果输出到文件topn_words.txt中,每行一个词,格式为: # 词,出现次数 def get_topn_words(words, topn): c = Counter(words).most_common(topn) top_words_with_freq = {} with open('top{0}_words.txt'.format(topn), 'w+') as f: for x in c: f.write('{0},{1}\n'.format(x[0], x[1])) top_words_with_freq[x[0]] = x[1] return top_words_with_freq # 传入文本文件的路径file_path和topn,获取文本文件中topn关键词列表及词频 def get_top_words(file_path, topn): # 读取文本文件,然后分词并缓存,只需运行一次,后续运行脚本可注释掉下面两行 # text = open(file_path).read() # words_with_attr = cut_and_cache(text) # 从cut_result.txt中读取带词性的分词结果列表 words_with_attr = read_cut_result() # 要过滤掉的词性列表 stop_attr = ['a', 'ad', 'b', 'c', 'd', 'f', 'df', 'm', 'mq', 'p', 'r', 'rr', 's', 't', 'u', 'v', 'z'] # 过滤掉不需要的词性的词 words = [x[0] for x in words_with_attr if x[1] not in stop_attr] # 获取topn的词并存入文件topn_words.txt,top_words_with_freq为一个字典,在生成词云的时候会用到,格式为: # {'aa':1002,'bb':879,'cc':456} top_words_with_freq = get_topn_words(words=words, topn=topn) return top_words_with_freq # 根据传入的背景图片路径和词频字典、字体文件,生成指定名称的词云图片 def generate_word_cloud(top_words_with_freq, font_path, to_save_img_path, background_color='white'): # 创建词云对象 wc = WordCloud(font_path=font_path, # 设置字体 background_color=background_color, # 词云图片的背景颜色,默认为白色 max_words=100, # 最大显示词数为100 max_font_size=80, # 字体最大字号 random_state=50, # 字体的最多模式 width=500, # 词云图片宽度 margin=2, # 词与词之间的间距 height=300) # 词云图片高度 # 用top_words_with_freq生成词云内容 wc.generate_from_frequencies(top_words_with_freq) # 用matplotlib绘出词云图片显示出来 plt.imshow(wc) plt.axis('off') plt.show() # 将词云图片保存成图片 wc.to_file(to_save_img_path) def main(): # 设置环境为utf-8编码格式,防止处理中文出错 reload(sys) sys.setdefaultencoding('utf-8') # 获取topn词汇的'词:词频'字典,santi.txt是当前目录下的文本文件 top_words_with_freq = get_top_words('./merge.txt', 100) # 生成词云图片,yahei.ttf是当前目录下微软雅黑字体文件,wordcloud.png是要生成的词云图片名 generate_word_cloud(top_words_with_freq, './yahei.ttf', './wordcloud.png') print 'all is finished!' if __name__ == '__main__': main()
libs/wordcloud/ai/word_cloud.py
import sys from wordcloud import WordCloud,ImageColorGenerator from collections import Counter import jieba.posseg as psg import matplotlib.pyplot as plt # 对文本分词并标注词性,并缓存到文件 def cut_and_cache(text): # 将文本分词,并附带上词性,因为数据量比较大,防止每次运行脚本都花大量时间,所以第一次分词后就将结果存入文件cut_result.txt中 # 相当于做一个缓存,格式为每个词占一行,每一行的内容为: # 词,词性 words_with_attr = [(x.word, x.flag) for x in psg.cut(text) if len(x.word) >= 2] with open('cut_result.txt', 'w+') as f: for x in words_with_attr: f.write('{0}\t{1}\n'.format(x[0], x[1])) return words_with_attr # 从cut_result.txt中读取带词性的分词结果列表 def read_cut_result(): words_with_attr = [] with open('cut_result.txt', 'r') as f: for x in f.readlines(): # 这里解码成utf-8格式,是为了防止后面生成词云的时候出现乱码 x = x.decode('utf-8') pair = x.split() if len(pair) < 2: continue words_with_attr.append((pair[0], pair[1])) return words_with_attr #  统计在分词表中出现次数排名前topn的词的列表,并将结果输出到文件topn_words.txt中,每行一个词,格式为: # 词,出现次数 def get_topn_words(words, topn): c = Counter(words).most_common(topn) top_words_with_freq = {} with open('top{0}_words.txt'.format(topn), 'w+') as f: for x in c: f.write('{0},{1}\n'.format(x[0], x[1])) top_words_with_freq[x[0]] = x[1] return top_words_with_freq # 传入文本文件的路径file_path和topn,获取文本文件中topn关键词列表及词频 def get_top_words(file_path, topn): # 读取文本文件,然后分词并缓存,只需运行一次,后续运行脚本可注释掉下面两行 # text = open(file_path).read() # words_with_attr = cut_and_cache(text) # 从cut_result.txt中读取带词性的分词结果列表 words_with_attr = read_cut_result() # 要过滤掉的词性列表 stop_attr = ['a', 'ad', 'b', 'c', 'd', 'f', 'df', 'm', 'mq', 'p', 'r', 'rr', 's', 't', 'u', 'v', 'z'] # 过滤掉不需要的词性的词 words = [x[0] for x in words_with_attr if x[1] not in stop_attr] # 获取topn的词并存入文件topn_words.txt,top_words_with_freq为一个字典,在生成词云的时候会用到,格式为: # {'aa':1002,'bb':879,'cc':456} top_words_with_freq = get_topn_words(words=words, topn=topn) return top_words_with_freq # 根据传入的背景图片路径和词频字典、字体文件,生成指定名称的词云图片 def generate_word_cloud(top_words_with_freq, font_path, to_save_img_path, background_color='white'): # 创建词云对象 wc = WordCloud(font_path=font_path, # 设置字体 background_color=background_color, # 词云图片的背景颜色,默认为白色 max_words=100, # 最大显示词数为100 max_font_size=80, # 字体最大字号 random_state=50, # 字体的最多模式 width=500, # 词云图片宽度 margin=2, # 词与词之间的间距 height=300) # 词云图片高度 # 用top_words_with_freq生成词云内容 wc.generate_from_frequencies(top_words_with_freq) # 用matplotlib绘出词云图片显示出来 plt.imshow(wc) plt.axis('off') plt.show() # 将词云图片保存成图片 wc.to_file(to_save_img_path) def main(): # 设置环境为utf-8编码格式,防止处理中文出错 reload(sys) sys.setdefaultencoding('utf-8') # 获取topn词汇的'词:词频'字典,santi.txt是当前目录下的文本文件 top_words_with_freq = get_top_words('./merge.txt', 100) # 生成词云图片,yahei.ttf是当前目录下微软雅黑字体文件,wordcloud.png是要生成的词云图片名 generate_word_cloud(top_words_with_freq, './yahei.ttf', './wordcloud.png') print 'all is finished!' if __name__ == '__main__': main()
0.133049
0.379953
import pytest from unittest.mock import patch import logging from pyvesync_v2 import VeSync, VeSyncAir131 from pyvesync_v2.helpers import Helpers as helpers from . import call_json DEV_LIST_DETAIL = call_json.LIST_CONF_AIR CORRECT_LIST = call_json.DEVLIST_AIR ENERGY_HISTORY = call_json.ENERGY_HISTORY CORRECT_DETAILS = call_json.DETAILS_AIR BAD_LIST = call_json.DETAILS_BADCODE class TestVesyncAirPurifier(object): """Air purifier tests.""" @pytest.fixture() def api_mock(self, caplog): """Mock call_api and initialize VeSync object.""" self.mock_api_call = patch('pyvesync_v2.helpers.Helpers.call_api') self.mock_api = self.mock_api_call.start() self.mock_api.create_autospect() self.mock_api.return_value.ok = True self.vesync_obj = VeSync('<EMAIL>', 'pass') self.vesync_obj.enabled = True self.vesync_obj.login = True self.vesync_obj.token = '<PASSWORD>' self.vesync_obj.account_id = 'sample_actid' caplog.set_level(logging.DEBUG) yield self.mock_api_call.stop() def test_airpur_conf(self, api_mock): """Tests that 15A Outlet is instantiated properly.""" self.mock_api.return_value = CORRECT_LIST fans = self.vesync_obj.get_devices() fan = fans[2] assert len(fan) == 1 fan = fan[0] assert isinstance(fan, VeSyncAir131) assert fan.device_name == "Name Air Purifier" assert fan.device_type == "LV-PUR131S" assert fan.cid == "AIRPUR-CID" assert fan.uuid == "UUID" def test_airpur_details(self, api_mock): """Test 15A get_details().""" self.mock_api.return_value = CORRECT_DETAILS fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) fan.get_details() dev_details = fan.details assert fan.device_status == 'on' assert type(dev_details) == dict assert dev_details['active_time'] == 1 assert fan.filter_life == 100 assert dev_details['screen_status'] == 'on' assert fan.mode == 'manual' assert dev_details['level'] == 1 assert fan.fan_level == 1 assert dev_details['air_quality'] == 'excellent' assert fan.air_quality == 'excellent' def test_airpur_details_fail(self, caplog, api_mock): """Test Air Purifier get_details with Code>0.""" self.mock_api.return_value = BAD_LIST fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) fan.get_details() assert len(caplog.records) == 2 assert 'details' in caplog.text def test_airpur_onoff(self, caplog, api_mock): """Test Air Purifier Device On/Off Methods.""" self.mock_api.return_value = ({"code": 0}, 200) fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) head = helpers.req_headers(self.vesync_obj) body = helpers.req_body(self.vesync_obj, 'devicestatus') fan.device_status = 'off' body['status'] = 'on' body['uuid'] = fan.uuid on = fan.turn_on() self.mock_api.assert_called_with( '/131airPurifier/v1/device/deviceStatus', 'put', json=body, headers=head) call_args = self.mock_api.call_args_list[0][0] assert call_args[0] == '/131airPurifier/v1/device/deviceStatus' assert call_args[1] == 'put' assert on fan.device_status = 'on' off = fan.turn_off() body['status'] = 'off' self.mock_api.assert_called_with( '/131airPurifier/v1/device/deviceStatus', 'put', json=body, headers=head) assert off def test_airpur_onoff_fail(self, api_mock): """Test Air Purifier On/Off Fail with Code>0.""" self.mock_api.return_value = ({"code": 1}, 400) vswitch15a = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) assert not vswitch15a.turn_on() assert not vswitch15a.turn_off() def test_airpur_fanspeed(self, caplog, api_mock): """Test changing fan speed of.""" self.mock_api.return_value = ({'code': 0}, 200) fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) fan.details['level'] = 1 b = fan.change_fan_speed() assert fan.fan_level == 2 b = fan.change_fan_speed() assert fan.fan_level == 3 b = fan.change_fan_speed() assert fan.fan_level == 1 assert b b = fan.change_fan_speed(2) assert b assert fan.fan_level == 2 def test_mode_toggle(self, caplog, api_mock): """Test changing modes on air purifier.""" self.mock_api.return_value = ({'code': 0}, 200) fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) f = fan.auto_mode() assert f assert fan.mode == 'auto' f = fan.manual_mode() assert fan.mode == 'manual' assert f f = fan.sleep_mode() assert fan.mode == 'sleep' assert f
src/tests/test_air_pur.py
import pytest from unittest.mock import patch import logging from pyvesync_v2 import VeSync, VeSyncAir131 from pyvesync_v2.helpers import Helpers as helpers from . import call_json DEV_LIST_DETAIL = call_json.LIST_CONF_AIR CORRECT_LIST = call_json.DEVLIST_AIR ENERGY_HISTORY = call_json.ENERGY_HISTORY CORRECT_DETAILS = call_json.DETAILS_AIR BAD_LIST = call_json.DETAILS_BADCODE class TestVesyncAirPurifier(object): """Air purifier tests.""" @pytest.fixture() def api_mock(self, caplog): """Mock call_api and initialize VeSync object.""" self.mock_api_call = patch('pyvesync_v2.helpers.Helpers.call_api') self.mock_api = self.mock_api_call.start() self.mock_api.create_autospect() self.mock_api.return_value.ok = True self.vesync_obj = VeSync('<EMAIL>', 'pass') self.vesync_obj.enabled = True self.vesync_obj.login = True self.vesync_obj.token = '<PASSWORD>' self.vesync_obj.account_id = 'sample_actid' caplog.set_level(logging.DEBUG) yield self.mock_api_call.stop() def test_airpur_conf(self, api_mock): """Tests that 15A Outlet is instantiated properly.""" self.mock_api.return_value = CORRECT_LIST fans = self.vesync_obj.get_devices() fan = fans[2] assert len(fan) == 1 fan = fan[0] assert isinstance(fan, VeSyncAir131) assert fan.device_name == "Name Air Purifier" assert fan.device_type == "LV-PUR131S" assert fan.cid == "AIRPUR-CID" assert fan.uuid == "UUID" def test_airpur_details(self, api_mock): """Test 15A get_details().""" self.mock_api.return_value = CORRECT_DETAILS fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) fan.get_details() dev_details = fan.details assert fan.device_status == 'on' assert type(dev_details) == dict assert dev_details['active_time'] == 1 assert fan.filter_life == 100 assert dev_details['screen_status'] == 'on' assert fan.mode == 'manual' assert dev_details['level'] == 1 assert fan.fan_level == 1 assert dev_details['air_quality'] == 'excellent' assert fan.air_quality == 'excellent' def test_airpur_details_fail(self, caplog, api_mock): """Test Air Purifier get_details with Code>0.""" self.mock_api.return_value = BAD_LIST fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) fan.get_details() assert len(caplog.records) == 2 assert 'details' in caplog.text def test_airpur_onoff(self, caplog, api_mock): """Test Air Purifier Device On/Off Methods.""" self.mock_api.return_value = ({"code": 0}, 200) fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) head = helpers.req_headers(self.vesync_obj) body = helpers.req_body(self.vesync_obj, 'devicestatus') fan.device_status = 'off' body['status'] = 'on' body['uuid'] = fan.uuid on = fan.turn_on() self.mock_api.assert_called_with( '/131airPurifier/v1/device/deviceStatus', 'put', json=body, headers=head) call_args = self.mock_api.call_args_list[0][0] assert call_args[0] == '/131airPurifier/v1/device/deviceStatus' assert call_args[1] == 'put' assert on fan.device_status = 'on' off = fan.turn_off() body['status'] = 'off' self.mock_api.assert_called_with( '/131airPurifier/v1/device/deviceStatus', 'put', json=body, headers=head) assert off def test_airpur_onoff_fail(self, api_mock): """Test Air Purifier On/Off Fail with Code>0.""" self.mock_api.return_value = ({"code": 1}, 400) vswitch15a = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) assert not vswitch15a.turn_on() assert not vswitch15a.turn_off() def test_airpur_fanspeed(self, caplog, api_mock): """Test changing fan speed of.""" self.mock_api.return_value = ({'code': 0}, 200) fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) fan.details['level'] = 1 b = fan.change_fan_speed() assert fan.fan_level == 2 b = fan.change_fan_speed() assert fan.fan_level == 3 b = fan.change_fan_speed() assert fan.fan_level == 1 assert b b = fan.change_fan_speed(2) assert b assert fan.fan_level == 2 def test_mode_toggle(self, caplog, api_mock): """Test changing modes on air purifier.""" self.mock_api.return_value = ({'code': 0}, 200) fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj) f = fan.auto_mode() assert f assert fan.mode == 'auto' f = fan.manual_mode() assert fan.mode == 'manual' assert f f = fan.sleep_mode() assert fan.mode == 'sleep' assert f
0.565299
0.268198
import uuid from flask import Blueprint, redirect, request, url_for, flash, abort from flask_login import login_required, current_user from flask_babelplus import gettext as _ from flaskbb.extensions import db from flaskbb.utils.settings import flaskbb_config from flaskbb.utils.helpers import render_template, format_quote, time_utcnow from flaskbb.message.forms import ConversationForm, MessageForm from flaskbb.message.models import Conversation, Message from flaskbb.user.models import User message = Blueprint("message", __name__) @message.route("/") @message.route("/inbox") @login_required def inbox(): page = request.args.get('page', 1, type=int) conversations = Conversation.query.\ filter( Conversation.id == Message.conversation_id, Conversation.user_id == current_user.id, Conversation.draft == False, Conversation.trash == False ).\ order_by(Message.date_created.desc()).\ paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False) message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() return render_template("message/inbox.html", conversations=conversations, message_count=message_count) @message.route("/<int:conversation_id>/view", methods=["GET", "POST"]) def view_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() if conversation.unread: conversation.unread = False current_user.invalidate_cache(permissions=False) conversation.save() form = MessageForm() if form.validate_on_submit(): message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() if message_count >= flaskbb_config["MESSAGE_QUOTA"]: flash(_("You cannot send any messages anymore because you have " "reached your message limit."), "danger") return redirect(url_for("message.view_conversation", conversation_id=conversation.id)) to_user_id = None # If the current_user is the user who recieved the message # then we have to change the id's a bit. if current_user.id == conversation.to_user_id: to_user_id = conversation.from_user_id else: to_user_id = conversation.to_user_id form.save(conversation=conversation, user_id=current_user.id) # save the message in the recievers conversation old_conv = conversation conversation = Conversation.query.\ filter( Conversation.user_id == to_user_id, Conversation.shared_id == conversation.shared_id ).first() # user deleted the conversation, start a new conversation with just # the recieving message if conversation is None: conversation = Conversation( subject=old_conv.subject, from_user_id=current_user.id, to_user=to_user_id, user_id=to_user_id, shared_id=old_conv.shared_id ) conversation.save() form.save(conversation=conversation, user_id=current_user.id, unread=True) conversation.to_user.invalidate_cache(permissions=False) return redirect(url_for("message.view_conversation", conversation_id=old_conv.id)) return render_template("message/conversation.html", conversation=conversation, form=form) @message.route("/new", methods=["POST", "GET"]) @login_required def new_conversation(): form = ConversationForm() to_user = request.args.get("to_user") message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() if message_count >= flaskbb_config["MESSAGE_QUOTA"]: flash(_("You cannot send any messages anymore because you have " "reached your message limit."), "danger") return redirect(url_for("message.inbox")) if request.method == "POST": if "save_message" in request.form and form.validate(): to_user = User.query.filter_by(username=form.to_user.data).first() shared_id = uuid.uuid4() form.save(from_user=current_user.id, to_user=to_user.id, user_id=current_user.id, unread=False, as_draft=True, shared_id=shared_id) flash(_("Message saved."), "success") return redirect(url_for("message.drafts")) if "send_message" in request.form and form.validate(): to_user = User.query.filter_by(username=form.to_user.data).first() # this is the shared id between conversations because the messages # are saved on both ends shared_id = uuid.uuid4() # Save the message in the current users inbox form.save(from_user=current_user.id, to_user=to_user.id, user_id=current_user.id, unread=False, shared_id=shared_id) # Save the message in the recievers inbox form.save(from_user=current_user.id, to_user=to_user.id, user_id=to_user.id, unread=True, shared_id=shared_id) to_user.invalidate_cache(permissions=False) flash(_("Message sent."), "success") return redirect(url_for("message.sent")) else: form.to_user.data = to_user return render_template("message/message_form.html", form=form, title=_("Compose Message")) @message.route("/message/<int:message_id>/raw") @login_required def raw_message(message_id): message = Message.query.filter_by(id=message_id).first_or_404() # abort if the message was not the current_user's one or the one of the # recieved ones if not (message.conversation.from_user_id == current_user.id or message.conversation.to_user_id == current_user.id): abort(404) return format_quote(username=message.user.username, content=message.message) @message.route("/<int:conversation_id>/edit", methods=["POST", "GET"]) @login_required def edit_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() if not conversation.draft: flash(_("You cannot edit a sent message."), "danger") return redirect(url_for("message.inbox")) form = ConversationForm() if request.method == "POST": if "save_message" in request.form: to_user = User.query.filter_by(username=form.to_user.data).first() conversation.draft = True conversation.to_user_id = to_user.id conversation.first_message.message = form.message.data conversation.save() flash(_("Message saved."), "success") return redirect(url_for("message.drafts")) if "send_message" in request.form and form.validate(): to_user = User.query.filter_by(username=form.to_user.data).first() # Save the message in the recievers inbox form.save(from_user=current_user.id, to_user=to_user.id, user_id=to_user.id, unread=True, shared_id=conversation.shared_id) # Move the message from ``Drafts`` to ``Sent``. conversation.draft = False conversation.to_user = to_user conversation.date_created = time_utcnow() conversation.save() flash(_("Message sent."), "success") return redirect(url_for("message.sent")) else: form.to_user.data = conversation.to_user.username form.subject.data = conversation.subject form.message.data = conversation.first_message.message return render_template("message/message_form.html", form=form, title=_("Edit Message")) @message.route("/<int:conversation_id>/move", methods=["POST"]) @login_required def move_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() conversation.trash = True conversation.save() return redirect(url_for("message.inbox")) @message.route("/<int:conversation_id>/restore", methods=["POST"]) @login_required def restore_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() conversation.trash = False conversation.save() return redirect(url_for("message.inbox")) @message.route("/<int:conversation_id>/delete", methods=["POST"]) @login_required def delete_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() conversation.delete() return redirect(url_for("message.inbox")) @message.route("/sent") @login_required def sent(): page = request.args.get('page', 1, type=int) conversations = Conversation.query.\ filter( Conversation.id == Message.conversation_id, Conversation.user_id == current_user.id, Conversation.draft == False, Conversation.trash == False, db.not_(Conversation.to_user_id == current_user.id) ).\ order_by(Message.date_created.desc()).\ paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False) message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() return render_template("message/sent.html", conversations=conversations, message_count=message_count) @message.route("/draft") @login_required def drafts(): page = request.args.get('page', 1, type=int) conversations = Conversation.query.\ filter( Conversation.id == Message.conversation_id, Conversation.user_id == current_user.id, Conversation.draft == True, Conversation.trash == False ).\ order_by(Message.date_created.desc()).\ paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False) message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() return render_template("message/drafts.html", conversations=conversations, message_count=message_count) @message.route("/trash") @login_required def trash(): page = request.args.get('page', 1, type=int) conversations = Conversation.query.\ filter( Conversation.id == Message.conversation_id, Conversation.user_id == current_user.id, Conversation.trash == True, ).\ order_by(Message.date_created.desc()).\ paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False) message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() return render_template("message/trash.html", conversations=conversations, message_count=message_count)
flaskbb/message/views.py
import uuid from flask import Blueprint, redirect, request, url_for, flash, abort from flask_login import login_required, current_user from flask_babelplus import gettext as _ from flaskbb.extensions import db from flaskbb.utils.settings import flaskbb_config from flaskbb.utils.helpers import render_template, format_quote, time_utcnow from flaskbb.message.forms import ConversationForm, MessageForm from flaskbb.message.models import Conversation, Message from flaskbb.user.models import User message = Blueprint("message", __name__) @message.route("/") @message.route("/inbox") @login_required def inbox(): page = request.args.get('page', 1, type=int) conversations = Conversation.query.\ filter( Conversation.id == Message.conversation_id, Conversation.user_id == current_user.id, Conversation.draft == False, Conversation.trash == False ).\ order_by(Message.date_created.desc()).\ paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False) message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() return render_template("message/inbox.html", conversations=conversations, message_count=message_count) @message.route("/<int:conversation_id>/view", methods=["GET", "POST"]) def view_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() if conversation.unread: conversation.unread = False current_user.invalidate_cache(permissions=False) conversation.save() form = MessageForm() if form.validate_on_submit(): message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() if message_count >= flaskbb_config["MESSAGE_QUOTA"]: flash(_("You cannot send any messages anymore because you have " "reached your message limit."), "danger") return redirect(url_for("message.view_conversation", conversation_id=conversation.id)) to_user_id = None # If the current_user is the user who recieved the message # then we have to change the id's a bit. if current_user.id == conversation.to_user_id: to_user_id = conversation.from_user_id else: to_user_id = conversation.to_user_id form.save(conversation=conversation, user_id=current_user.id) # save the message in the recievers conversation old_conv = conversation conversation = Conversation.query.\ filter( Conversation.user_id == to_user_id, Conversation.shared_id == conversation.shared_id ).first() # user deleted the conversation, start a new conversation with just # the recieving message if conversation is None: conversation = Conversation( subject=old_conv.subject, from_user_id=current_user.id, to_user=to_user_id, user_id=to_user_id, shared_id=old_conv.shared_id ) conversation.save() form.save(conversation=conversation, user_id=current_user.id, unread=True) conversation.to_user.invalidate_cache(permissions=False) return redirect(url_for("message.view_conversation", conversation_id=old_conv.id)) return render_template("message/conversation.html", conversation=conversation, form=form) @message.route("/new", methods=["POST", "GET"]) @login_required def new_conversation(): form = ConversationForm() to_user = request.args.get("to_user") message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() if message_count >= flaskbb_config["MESSAGE_QUOTA"]: flash(_("You cannot send any messages anymore because you have " "reached your message limit."), "danger") return redirect(url_for("message.inbox")) if request.method == "POST": if "save_message" in request.form and form.validate(): to_user = User.query.filter_by(username=form.to_user.data).first() shared_id = uuid.uuid4() form.save(from_user=current_user.id, to_user=to_user.id, user_id=current_user.id, unread=False, as_draft=True, shared_id=shared_id) flash(_("Message saved."), "success") return redirect(url_for("message.drafts")) if "send_message" in request.form and form.validate(): to_user = User.query.filter_by(username=form.to_user.data).first() # this is the shared id between conversations because the messages # are saved on both ends shared_id = uuid.uuid4() # Save the message in the current users inbox form.save(from_user=current_user.id, to_user=to_user.id, user_id=current_user.id, unread=False, shared_id=shared_id) # Save the message in the recievers inbox form.save(from_user=current_user.id, to_user=to_user.id, user_id=to_user.id, unread=True, shared_id=shared_id) to_user.invalidate_cache(permissions=False) flash(_("Message sent."), "success") return redirect(url_for("message.sent")) else: form.to_user.data = to_user return render_template("message/message_form.html", form=form, title=_("Compose Message")) @message.route("/message/<int:message_id>/raw") @login_required def raw_message(message_id): message = Message.query.filter_by(id=message_id).first_or_404() # abort if the message was not the current_user's one or the one of the # recieved ones if not (message.conversation.from_user_id == current_user.id or message.conversation.to_user_id == current_user.id): abort(404) return format_quote(username=message.user.username, content=message.message) @message.route("/<int:conversation_id>/edit", methods=["POST", "GET"]) @login_required def edit_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() if not conversation.draft: flash(_("You cannot edit a sent message."), "danger") return redirect(url_for("message.inbox")) form = ConversationForm() if request.method == "POST": if "save_message" in request.form: to_user = User.query.filter_by(username=form.to_user.data).first() conversation.draft = True conversation.to_user_id = to_user.id conversation.first_message.message = form.message.data conversation.save() flash(_("Message saved."), "success") return redirect(url_for("message.drafts")) if "send_message" in request.form and form.validate(): to_user = User.query.filter_by(username=form.to_user.data).first() # Save the message in the recievers inbox form.save(from_user=current_user.id, to_user=to_user.id, user_id=to_user.id, unread=True, shared_id=conversation.shared_id) # Move the message from ``Drafts`` to ``Sent``. conversation.draft = False conversation.to_user = to_user conversation.date_created = time_utcnow() conversation.save() flash(_("Message sent."), "success") return redirect(url_for("message.sent")) else: form.to_user.data = conversation.to_user.username form.subject.data = conversation.subject form.message.data = conversation.first_message.message return render_template("message/message_form.html", form=form, title=_("Edit Message")) @message.route("/<int:conversation_id>/move", methods=["POST"]) @login_required def move_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() conversation.trash = True conversation.save() return redirect(url_for("message.inbox")) @message.route("/<int:conversation_id>/restore", methods=["POST"]) @login_required def restore_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() conversation.trash = False conversation.save() return redirect(url_for("message.inbox")) @message.route("/<int:conversation_id>/delete", methods=["POST"]) @login_required def delete_conversation(conversation_id): conversation = Conversation.query.filter_by( id=conversation_id, user_id=current_user.id ).first_or_404() conversation.delete() return redirect(url_for("message.inbox")) @message.route("/sent") @login_required def sent(): page = request.args.get('page', 1, type=int) conversations = Conversation.query.\ filter( Conversation.id == Message.conversation_id, Conversation.user_id == current_user.id, Conversation.draft == False, Conversation.trash == False, db.not_(Conversation.to_user_id == current_user.id) ).\ order_by(Message.date_created.desc()).\ paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False) message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() return render_template("message/sent.html", conversations=conversations, message_count=message_count) @message.route("/draft") @login_required def drafts(): page = request.args.get('page', 1, type=int) conversations = Conversation.query.\ filter( Conversation.id == Message.conversation_id, Conversation.user_id == current_user.id, Conversation.draft == True, Conversation.trash == False ).\ order_by(Message.date_created.desc()).\ paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False) message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() return render_template("message/drafts.html", conversations=conversations, message_count=message_count) @message.route("/trash") @login_required def trash(): page = request.args.get('page', 1, type=int) conversations = Conversation.query.\ filter( Conversation.id == Message.conversation_id, Conversation.user_id == current_user.id, Conversation.trash == True, ).\ order_by(Message.date_created.desc()).\ paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False) message_count = Conversation.query.\ filter(Conversation.user_id == current_user.id).\ count() return render_template("message/trash.html", conversations=conversations, message_count=message_count)
0.361954
0.0566
import os import json from typing import Optional import torch import numpy as np from tqdm import tqdm from sklearn import metrics from persia.ctx import TrainCtx, eval_ctx from persia.distributed import DDPOption from persia.embedding.optim import Adagrad from persia.embedding.data import PersiaBatch from persia.env import get_rank, get_local_rank, get_world_size from persia.logger import get_default_logger from persia.utils import setup_seed from persia.data import Dataloder, PersiaDataset, StreamingDataset from persia.prelude import PersiaBatchDataSender from model import DNN from data_generator import make_dataloader logger = get_default_logger("nn_worker") setup_seed(3) CPU_TEST_AUC = 0.8936692224423999 GPU_TEST_AUC = 0.8934601372796367 class TestDataset(PersiaDataset): def __init__(self, test_dir: str, batch_size: int = 128): super(TestDataset, self).__init__(buffer_size=10) self.loader = make_dataloader(test_dir, batch_size) logger.info(f"test dataset size is {len(self.loader)}") def fetch_data(self, persia_sender_channel: PersiaBatchDataSender): logger.info("test loader start to generating data...") for _idx, (non_id_type_feature, id_type_features, label) in enumerate( tqdm(self.loader, desc="generating data") ): persia_batch = PersiaBatch( id_type_features, non_id_type_features=[non_id_type_feature], labels=[label], requires_grad=False, ) persia_sender_channel.send(persia_batch.data) def __len__(self): return len(self.loader) def test( model: torch.nn.Module, clear_embeddings: bool = False, checkpoint_dir: Optional[str] = None, cuda: bool = True, ): logger.info("start to test...") model.eval() test_dir = os.path.join("/data/test.npz") test_dataset = TestDataset(test_dir, batch_size=128) with eval_ctx(model=model) as ctx: test_loader = Dataloder(test_dataset, is_training=False) if checkpoint_dir is not None: logger.info(f"loading checkpoint {checkpoint_dir}") ctx.load_checkpoint(checkpoint_dir) accuracies, losses = [], [] all_pred, all_target = [], [] for (_batch_idx, batch_data) in enumerate(tqdm(test_loader, desc="test...")): (pred, targets) = ctx.forward(batch_data) target = targets[0] loss = loss_fn(pred, target) if cuda: pred = pred.cpu() target = target.cpu() else: # cpu mode need copy the target data to avoid use the expired data. target = target.clone() all_pred.append(pred.detach().numpy()) all_target.append(target.detach().numpy()) accuracy = (torch.round(pred) == target).sum() / target.shape[0] accuracies.append(accuracy) losses.append(float(loss)) if clear_embeddings: ctx.clear_embeddings() num_ids = sum(ctx.get_embedding_size()) assert num_ids == 0, f"clear embedding failed" all_pred, all_target = np.concatenate(all_pred), np.concatenate(all_target) fpr, tpr, th = metrics.roc_curve(all_target, all_pred) test_auc = metrics.auc(fpr, tpr) test_accuracy = torch.mean(torch.tensor(accuracies)) test_loss = torch.mean(torch.tensor(losses)) logger.info( f"test auc is {test_auc} accuracy is {test_accuracy}, loss is {test_loss}" ) model.train() return test_auc, test_accuracy if __name__ == "__main__": model = DNN() logger.info("init Simple DNN model...") rank, device_id, world_size = get_rank(), get_local_rank(), get_world_size() mixed_precision = True if torch.cuda.is_available(): torch.cuda.set_device(device_id) model.cuda(device_id) backend = "nccl" cuda = True else: mixed_precision = False device_id = None backend = "gloo" cuda = False dense_optimizer = torch.optim.SGD(model.parameters(), lr=0.1) embedding_optimizer = Adagrad(lr=1e-2) loss_fn = torch.nn.BCELoss(reduction="mean") eval_checkpoint_dir = os.environ["EVAL_CHECKPOINT_DIR"] infer_checkpoint_dir = os.environ["INFER_CHECKPOINT_DIR"] hdfs_checkpoint_dir = os.environ["HDFS_CHECKPOINT_DIR"] test_interval = 254 buffer_size = 10 with TrainCtx( model=model, embedding_optimizer=embedding_optimizer, dense_optimizer=dense_optimizer, device_id=device_id, mixed_precision=mixed_precision, distributed_option=DDPOption(backend=backend), ) as ctx: train_dataloader = Dataloder( StreamingDataset(buffer_size), reproducible=True, embedding_staleness=1 ) for (batch_idx, data) in enumerate(train_dataloader): (output, targets) = ctx.forward(data) target = targets[0] loss = loss_fn(output, target) scaled_loss = ctx.backward(loss) accuracy = (torch.round(output) == target).sum() / target.shape[0] logger.info( f"current idx: {batch_idx} loss: {loss} scaled_loss: {scaled_loss} accuracy: {accuracy}" ) if batch_idx % test_interval == 0 and batch_idx != 0: test_auc, test_acc = test(model, cuda=cuda) np.testing.assert_equal( np.array([test_auc]), np.array([GPU_TEST_AUC if cuda else CPU_TEST_AUC]), ) break ctx.dump_checkpoint(eval_checkpoint_dir) logger.info(f"dump checkpoint to {eval_checkpoint_dir}") ctx.dump_checkpoint(hdfs_checkpoint_dir) logger.info(f"dump checkpoint to {hdfs_checkpoint_dir}") ctx.dump_checkpoint(infer_checkpoint_dir, with_jit_model=True) logger.info(f"dump checkpoint to {infer_checkpoint_dir}") ctx.clear_embeddings() num_ids = sum(ctx.get_embedding_size()) assert num_ids == 0, f"clear embedding failed" eval_auc, eval_acc = test( model, clear_embeddings=True, checkpoint_dir=eval_checkpoint_dir, cuda=cuda ) np.testing.assert_equal(np.array([test_auc]), np.array([eval_auc])) eval_auc, eval_acc = test( model, clear_embeddings=True, checkpoint_dir=hdfs_checkpoint_dir, cuda=cuda ) np.testing.assert_equal(np.array([test_auc]), np.array([eval_auc])) result_filepath = os.environ["RESULT_FILE_PATH"] result = { "test_auc": test_auc, "eval_auc": eval_auc, } result = json.dumps(result) with open(result_filepath, "w") as f: f.write(result)
e2e/adult_income/train.py
import os import json from typing import Optional import torch import numpy as np from tqdm import tqdm from sklearn import metrics from persia.ctx import TrainCtx, eval_ctx from persia.distributed import DDPOption from persia.embedding.optim import Adagrad from persia.embedding.data import PersiaBatch from persia.env import get_rank, get_local_rank, get_world_size from persia.logger import get_default_logger from persia.utils import setup_seed from persia.data import Dataloder, PersiaDataset, StreamingDataset from persia.prelude import PersiaBatchDataSender from model import DNN from data_generator import make_dataloader logger = get_default_logger("nn_worker") setup_seed(3) CPU_TEST_AUC = 0.8936692224423999 GPU_TEST_AUC = 0.8934601372796367 class TestDataset(PersiaDataset): def __init__(self, test_dir: str, batch_size: int = 128): super(TestDataset, self).__init__(buffer_size=10) self.loader = make_dataloader(test_dir, batch_size) logger.info(f"test dataset size is {len(self.loader)}") def fetch_data(self, persia_sender_channel: PersiaBatchDataSender): logger.info("test loader start to generating data...") for _idx, (non_id_type_feature, id_type_features, label) in enumerate( tqdm(self.loader, desc="generating data") ): persia_batch = PersiaBatch( id_type_features, non_id_type_features=[non_id_type_feature], labels=[label], requires_grad=False, ) persia_sender_channel.send(persia_batch.data) def __len__(self): return len(self.loader) def test( model: torch.nn.Module, clear_embeddings: bool = False, checkpoint_dir: Optional[str] = None, cuda: bool = True, ): logger.info("start to test...") model.eval() test_dir = os.path.join("/data/test.npz") test_dataset = TestDataset(test_dir, batch_size=128) with eval_ctx(model=model) as ctx: test_loader = Dataloder(test_dataset, is_training=False) if checkpoint_dir is not None: logger.info(f"loading checkpoint {checkpoint_dir}") ctx.load_checkpoint(checkpoint_dir) accuracies, losses = [], [] all_pred, all_target = [], [] for (_batch_idx, batch_data) in enumerate(tqdm(test_loader, desc="test...")): (pred, targets) = ctx.forward(batch_data) target = targets[0] loss = loss_fn(pred, target) if cuda: pred = pred.cpu() target = target.cpu() else: # cpu mode need copy the target data to avoid use the expired data. target = target.clone() all_pred.append(pred.detach().numpy()) all_target.append(target.detach().numpy()) accuracy = (torch.round(pred) == target).sum() / target.shape[0] accuracies.append(accuracy) losses.append(float(loss)) if clear_embeddings: ctx.clear_embeddings() num_ids = sum(ctx.get_embedding_size()) assert num_ids == 0, f"clear embedding failed" all_pred, all_target = np.concatenate(all_pred), np.concatenate(all_target) fpr, tpr, th = metrics.roc_curve(all_target, all_pred) test_auc = metrics.auc(fpr, tpr) test_accuracy = torch.mean(torch.tensor(accuracies)) test_loss = torch.mean(torch.tensor(losses)) logger.info( f"test auc is {test_auc} accuracy is {test_accuracy}, loss is {test_loss}" ) model.train() return test_auc, test_accuracy if __name__ == "__main__": model = DNN() logger.info("init Simple DNN model...") rank, device_id, world_size = get_rank(), get_local_rank(), get_world_size() mixed_precision = True if torch.cuda.is_available(): torch.cuda.set_device(device_id) model.cuda(device_id) backend = "nccl" cuda = True else: mixed_precision = False device_id = None backend = "gloo" cuda = False dense_optimizer = torch.optim.SGD(model.parameters(), lr=0.1) embedding_optimizer = Adagrad(lr=1e-2) loss_fn = torch.nn.BCELoss(reduction="mean") eval_checkpoint_dir = os.environ["EVAL_CHECKPOINT_DIR"] infer_checkpoint_dir = os.environ["INFER_CHECKPOINT_DIR"] hdfs_checkpoint_dir = os.environ["HDFS_CHECKPOINT_DIR"] test_interval = 254 buffer_size = 10 with TrainCtx( model=model, embedding_optimizer=embedding_optimizer, dense_optimizer=dense_optimizer, device_id=device_id, mixed_precision=mixed_precision, distributed_option=DDPOption(backend=backend), ) as ctx: train_dataloader = Dataloder( StreamingDataset(buffer_size), reproducible=True, embedding_staleness=1 ) for (batch_idx, data) in enumerate(train_dataloader): (output, targets) = ctx.forward(data) target = targets[0] loss = loss_fn(output, target) scaled_loss = ctx.backward(loss) accuracy = (torch.round(output) == target).sum() / target.shape[0] logger.info( f"current idx: {batch_idx} loss: {loss} scaled_loss: {scaled_loss} accuracy: {accuracy}" ) if batch_idx % test_interval == 0 and batch_idx != 0: test_auc, test_acc = test(model, cuda=cuda) np.testing.assert_equal( np.array([test_auc]), np.array([GPU_TEST_AUC if cuda else CPU_TEST_AUC]), ) break ctx.dump_checkpoint(eval_checkpoint_dir) logger.info(f"dump checkpoint to {eval_checkpoint_dir}") ctx.dump_checkpoint(hdfs_checkpoint_dir) logger.info(f"dump checkpoint to {hdfs_checkpoint_dir}") ctx.dump_checkpoint(infer_checkpoint_dir, with_jit_model=True) logger.info(f"dump checkpoint to {infer_checkpoint_dir}") ctx.clear_embeddings() num_ids = sum(ctx.get_embedding_size()) assert num_ids == 0, f"clear embedding failed" eval_auc, eval_acc = test( model, clear_embeddings=True, checkpoint_dir=eval_checkpoint_dir, cuda=cuda ) np.testing.assert_equal(np.array([test_auc]), np.array([eval_auc])) eval_auc, eval_acc = test( model, clear_embeddings=True, checkpoint_dir=hdfs_checkpoint_dir, cuda=cuda ) np.testing.assert_equal(np.array([test_auc]), np.array([eval_auc])) result_filepath = os.environ["RESULT_FILE_PATH"] result = { "test_auc": test_auc, "eval_auc": eval_auc, } result = json.dumps(result) with open(result_filepath, "w") as f: f.write(result)
0.883601
0.310054
from typing import Dict, List, Tuple from enum import Enum import bpy from bpy.props import ( FloatProperty, IntVectorProperty, FloatVectorProperty, BoolProperty, CollectionProperty, EnumProperty, IntProperty, StringProperty) from .operator_func.common import MeshType, AnimationLoopType from .common_data import MCBLEND_JustName # Animation properties class EffectTypes(Enum): ''' EffectTypes types of the effects in the event. ''' SOUND_EFFECT='Sound Effect' PARTICLE_EFFECT='Particle Effect' def list_effect_types_as_blender_enum(self, context): ''' List effect types for EnumProperty. ''' # pylint: disable=unused-argument return [(i.value, i.value, i.value) for i in EffectTypes] class MCBLEND_EffectProperties(bpy.types.PropertyGroup): ''' An effect of an event (sound or particles) ''' effect_type: EnumProperty( # type: ignore items=list_effect_types_as_blender_enum, name='Effect type') effect: StringProperty( # type: ignore name="Effect", description='The identifier of the sound effect.', default='', maxlen=1024) locator: StringProperty( # type: ignore name="Locator", description='The identifier of the locator effect.', default='', maxlen=1024) pre_effect_script: StringProperty( # type: ignore name="Locator", description='A Molang script that will be run when the particle emitter is initialized.', default='', maxlen=2048) bind_to_actor: BoolProperty( # type: ignore name="Bind to actor", description="Whether the should be spawned in the world without being bound to an actor.", default=True) def get_unused_event_name(base_name: str, i=1): ''' Gets the name of event which is not used by any other event in the animation. Uses the base name and adds number at the end of it to find unique name with pattern :code:`{base_name}.{number:04}`. This function assumes there is an active event and active animation. It will throw errors without asserting these conditions. ''' events = bpy.context.scene.mcblend_events name = base_name while name in events.keys(): name = f'{base_name}.{i:04}' i += 1 return name def _update_event_name(event, new_name: str): event['name'] = new_name def _set_event_name(self, value): events = bpy.context.scene.mcblend_events # Empty name is no allowed if value == '': return # If name already in use rename the other uv group for other_event in events: if ( # Change the of the duplicate if there is one other_event.path_from_id() != self.path_from_id() and other_event.name == value): # Get starting name index i = 1 base_name = value split_name = value.split('.') try: prev_i = int(split_name[-1]) i = i if prev_i <= 0 else prev_i base_name = '.'.join(split_name[:-1]) except ValueError: pass other_new_name = get_unused_event_name(base_name, i) _update_event_name(other_event, other_new_name) break _update_event_name(self, value) def _get_event_name(self): if 'name' not in self: return '' return self['name'] class MCBLEND_EventProperties(bpy.types.PropertyGroup): ''' A collection of sound and particle events. ''' name: StringProperty( # type: ignore name="Name", description=( "The name of the of the event. Also used to identify timeline " "markers that trigger this event."), # The Add operator overwrites default value on creation to trigger the # update function default='', maxlen=1024, set=_set_event_name, get=_get_event_name) effects: CollectionProperty( # type: ignore type=MCBLEND_EffectProperties, description='Collection of effects triggered of this event.', name='Sound effects') def get_effects_dict(self) -> Tuple[List[Dict], List[Dict]]: ''' Returns tuple of two lists (sound effects, particle effects). ''' sound_effects: List[Dict] = [] particle_effects: List[Dict] = [] for effect in self.effects: if effect.effect_type == EffectTypes.PARTICLE_EFFECT.value: result = {"effect": effect.effect} if effect.locator != '': result["locator"] = effect.locator if effect.pre_effect_script != '': result["pre_effect_script"] = ( effect.pre_effect_script) if not effect.bind_to_actor: result["bind_to_actor"] = effect.bind_to_actor particle_effects.append(result) elif effect.effect_type == EffectTypes.SOUND_EFFECT.value: sound_effects.append({"effect": effect.effect}) else: raise ValueError('Unknown effect type.') return sound_effects, particle_effects class MCBLEND_TimelineMarkerProperties(bpy.types.PropertyGroup): '''Saves the data about a timeline marker.''' name: StringProperty( # type: ignore name="Name", description="Name of the timeline marker.", default="marker", maxlen=1024 ) frame: IntProperty( # type: ignore name="Frame", description="The frame of the timeline marker.", default=0 ) class MCBLEND_AnimationProperties(bpy.types.PropertyGroup): '''Properties of an animation template.''' name: StringProperty( # type: ignore name="Name", description="Name of the animation.", default="animation", maxlen=1024 ) world_origin: StringProperty( # type: ignore name="World Origin Object", description="Name of the object to be used as the world origin for the animation.", default="", maxlen=1024 ) single_frame: BoolProperty( # type: ignore name="Single frame", description="Exports current pose as single frame animation", default=False, ) skip_rest_poses: BoolProperty( # type: ignore name="Skip rest poses", description=( "Whether bone transformations that represent a rest position " "throughout the whole animation should be ignored."), default=False, ) override_previous_animation: BoolProperty( # type: ignore name="Override previos animation", description=( "Sets the override_previous_animation property of the animation"), default=False, ) anim_time_update: StringProperty( # type: ignore name="anim_time_update", description="Adds anim_time_update value unless is left empty", default="", maxlen=1024 ) loop: EnumProperty( # type: ignore items=( ( AnimationLoopType.TRUE.value, AnimationLoopType.TRUE.value, 'The animation is looped' ), ( AnimationLoopType.FALSE.value, AnimationLoopType.FALSE.value, 'The animation has no effect on entity after it finished' ), ( AnimationLoopType.HOLD_ON_LAST_FRAME.value, AnimationLoopType.HOLD_ON_LAST_FRAME.value, 'After the end of animation the entity stays in the pose from ' 'the last frame' ) ), name='Loop') frame_start: IntProperty( # type: ignore name="Frame start", description="The first frame of the animation.", default=0, min=0 ) frame_current: IntProperty( # type: ignore name="Frame current", description="The current frame of the animation.", default=100, min=0 ) frame_end: IntProperty( # type: ignore name="Frame end", description="The last frame of the animation.", default=100, min=0 ) timeline_markers: CollectionProperty( # type: ignore type=MCBLEND_TimelineMarkerProperties, name='Timeline Markers', description='Timeline markers related to this animation.' ) nla_tracks: CollectionProperty( # type: ignore type=MCBLEND_JustName ) # Material properties def list_mesh_types_as_blender_enum(self, context): '''List mesh types for EnumProperty.''' # pylint: disable=unused-argument return [(i.value, i.value, i.value) for i in MeshType] class MCBLEND_FakeRcMaterialProperties(bpy.types.PropertyGroup): ''' Pattern-material pair for MCBLEND_FakeRcProperties object. ''' pattern: StringProperty( # type: ignore name="", description="The bone name pattern for assigning material.", default="", maxlen=1024) material: StringProperty( # type: ignore name="", description="Name of the material used by this render controller", default="", maxlen=1024 ) class MCBLEND_FakeRcProperties(bpy.types.PropertyGroup): ''' Armature property group similar to Minecraft render controller used for generating Minecraft materials. ''' texture: StringProperty( # type: ignore name="", description="Name of the texture used by this render controller", default="", maxlen=1024 ) materials: CollectionProperty( # type: ignore type=MCBLEND_FakeRcMaterialProperties, name='Materials') class MCBLEND_ObjectProperties(bpy.types.PropertyGroup): '''Custom properties of an object.''' # ARMATURE PROPERTIES (equivalent of minecraft model) model_name: StringProperty( # type: ignore name="", description="Name of the model", default="model", maxlen=1024 ) texture_template_resolution: IntProperty( # type: ignore name="Template texture resolution", description=( 'Sets the resolution of the template texture.' 'describes how many pixels on the image is represented by one ' 'texture_width or texture_height unit in model definition. ' 'The value of 1 gives the standard minecraft texture ' 'resolution.' ), default=1, min=1, soft_max=5, ) allow_expanding: BoolProperty( # type: ignore name="Allow Texture Expanding", description="Allows expanding texture during texture generation.", default=True, ) generate_texture: BoolProperty( # type: ignore name="Generate texture", description="Generates texture during UV mapping.", default=True, ) visible_bounds_offset: FloatVectorProperty( # type: ignore name="Visible bounds offset", description="visible_bounds_offset of the model", default=(0.0, 0.0, 0.0) ) visible_bounds_width: FloatProperty( # type: ignore name="Visible bounds width", description="visible_bounds_width of the model", default=1.0 ) visible_bounds_height: FloatProperty( # type: ignore name="Visible bounds height", description="visible_bounds_height of the model", default=1.0 ) texture_width: IntProperty( # type: ignore name="", description="Minecraft UV parameter width.", default=64, min=1 ) texture_height: IntProperty( # type: ignore name="", description=( "Minecraft UV parameter height. If you set it to 0 than the height" " of the texture will be picked automatically for you." ), default=64, min=1 ) # RENDER CONTROLLERS (armature properties used for generating materials) render_controllers: CollectionProperty( # type: ignore type=MCBLEND_FakeRcProperties, name="Render Controllers" ) # ANIMATIONS # Animation properties active_animation: IntProperty(default=0) # type: ignore animations: CollectionProperty( # type: ignore type=MCBLEND_AnimationProperties) # CUBE PROPERTIES mirror: BoolProperty( # type: ignore name="Mirror", description="Defines how to layout the UV during UV generation.", default=False, ) uv_group: StringProperty( # type: ignore name="UV group", description=( "Objects with the same UV group can be mapped to the same spot on " "the texture if they have the same dimensions. Empty string means " "that the object doesn't belong to any UV group."), default="", maxlen=1024 ) inflate: FloatProperty( # type: ignore name="Inflate", description="The inflate value of this object.", default=0.0 ) mesh_type: EnumProperty( # type: ignore items=list_mesh_types_as_blender_enum, # type: ignore name='Mesh type') min_uv_size: IntVectorProperty( # type: ignore name="Min UV size", default=(0.0, 0.0, 0.0), min=0, description=( "The lower UV boundary of the length of X dimension of a cube. If " "it's greater than the actual X, then the UV-mapper will act as " "if the X were equal to this value.") ) class MCBLEND_BoneProperties(bpy.types.PropertyGroup): ''' Custom properties of a bone ''' binding: StringProperty( # type: ignore name="Binding", description="The equivalent of Minecraft binding property", default="", maxlen=1024 )
mcblend/object_data.py
from typing import Dict, List, Tuple from enum import Enum import bpy from bpy.props import ( FloatProperty, IntVectorProperty, FloatVectorProperty, BoolProperty, CollectionProperty, EnumProperty, IntProperty, StringProperty) from .operator_func.common import MeshType, AnimationLoopType from .common_data import MCBLEND_JustName # Animation properties class EffectTypes(Enum): ''' EffectTypes types of the effects in the event. ''' SOUND_EFFECT='Sound Effect' PARTICLE_EFFECT='Particle Effect' def list_effect_types_as_blender_enum(self, context): ''' List effect types for EnumProperty. ''' # pylint: disable=unused-argument return [(i.value, i.value, i.value) for i in EffectTypes] class MCBLEND_EffectProperties(bpy.types.PropertyGroup): ''' An effect of an event (sound or particles) ''' effect_type: EnumProperty( # type: ignore items=list_effect_types_as_blender_enum, name='Effect type') effect: StringProperty( # type: ignore name="Effect", description='The identifier of the sound effect.', default='', maxlen=1024) locator: StringProperty( # type: ignore name="Locator", description='The identifier of the locator effect.', default='', maxlen=1024) pre_effect_script: StringProperty( # type: ignore name="Locator", description='A Molang script that will be run when the particle emitter is initialized.', default='', maxlen=2048) bind_to_actor: BoolProperty( # type: ignore name="Bind to actor", description="Whether the should be spawned in the world without being bound to an actor.", default=True) def get_unused_event_name(base_name: str, i=1): ''' Gets the name of event which is not used by any other event in the animation. Uses the base name and adds number at the end of it to find unique name with pattern :code:`{base_name}.{number:04}`. This function assumes there is an active event and active animation. It will throw errors without asserting these conditions. ''' events = bpy.context.scene.mcblend_events name = base_name while name in events.keys(): name = f'{base_name}.{i:04}' i += 1 return name def _update_event_name(event, new_name: str): event['name'] = new_name def _set_event_name(self, value): events = bpy.context.scene.mcblend_events # Empty name is no allowed if value == '': return # If name already in use rename the other uv group for other_event in events: if ( # Change the of the duplicate if there is one other_event.path_from_id() != self.path_from_id() and other_event.name == value): # Get starting name index i = 1 base_name = value split_name = value.split('.') try: prev_i = int(split_name[-1]) i = i if prev_i <= 0 else prev_i base_name = '.'.join(split_name[:-1]) except ValueError: pass other_new_name = get_unused_event_name(base_name, i) _update_event_name(other_event, other_new_name) break _update_event_name(self, value) def _get_event_name(self): if 'name' not in self: return '' return self['name'] class MCBLEND_EventProperties(bpy.types.PropertyGroup): ''' A collection of sound and particle events. ''' name: StringProperty( # type: ignore name="Name", description=( "The name of the of the event. Also used to identify timeline " "markers that trigger this event."), # The Add operator overwrites default value on creation to trigger the # update function default='', maxlen=1024, set=_set_event_name, get=_get_event_name) effects: CollectionProperty( # type: ignore type=MCBLEND_EffectProperties, description='Collection of effects triggered of this event.', name='Sound effects') def get_effects_dict(self) -> Tuple[List[Dict], List[Dict]]: ''' Returns tuple of two lists (sound effects, particle effects). ''' sound_effects: List[Dict] = [] particle_effects: List[Dict] = [] for effect in self.effects: if effect.effect_type == EffectTypes.PARTICLE_EFFECT.value: result = {"effect": effect.effect} if effect.locator != '': result["locator"] = effect.locator if effect.pre_effect_script != '': result["pre_effect_script"] = ( effect.pre_effect_script) if not effect.bind_to_actor: result["bind_to_actor"] = effect.bind_to_actor particle_effects.append(result) elif effect.effect_type == EffectTypes.SOUND_EFFECT.value: sound_effects.append({"effect": effect.effect}) else: raise ValueError('Unknown effect type.') return sound_effects, particle_effects class MCBLEND_TimelineMarkerProperties(bpy.types.PropertyGroup): '''Saves the data about a timeline marker.''' name: StringProperty( # type: ignore name="Name", description="Name of the timeline marker.", default="marker", maxlen=1024 ) frame: IntProperty( # type: ignore name="Frame", description="The frame of the timeline marker.", default=0 ) class MCBLEND_AnimationProperties(bpy.types.PropertyGroup): '''Properties of an animation template.''' name: StringProperty( # type: ignore name="Name", description="Name of the animation.", default="animation", maxlen=1024 ) world_origin: StringProperty( # type: ignore name="World Origin Object", description="Name of the object to be used as the world origin for the animation.", default="", maxlen=1024 ) single_frame: BoolProperty( # type: ignore name="Single frame", description="Exports current pose as single frame animation", default=False, ) skip_rest_poses: BoolProperty( # type: ignore name="Skip rest poses", description=( "Whether bone transformations that represent a rest position " "throughout the whole animation should be ignored."), default=False, ) override_previous_animation: BoolProperty( # type: ignore name="Override previos animation", description=( "Sets the override_previous_animation property of the animation"), default=False, ) anim_time_update: StringProperty( # type: ignore name="anim_time_update", description="Adds anim_time_update value unless is left empty", default="", maxlen=1024 ) loop: EnumProperty( # type: ignore items=( ( AnimationLoopType.TRUE.value, AnimationLoopType.TRUE.value, 'The animation is looped' ), ( AnimationLoopType.FALSE.value, AnimationLoopType.FALSE.value, 'The animation has no effect on entity after it finished' ), ( AnimationLoopType.HOLD_ON_LAST_FRAME.value, AnimationLoopType.HOLD_ON_LAST_FRAME.value, 'After the end of animation the entity stays in the pose from ' 'the last frame' ) ), name='Loop') frame_start: IntProperty( # type: ignore name="Frame start", description="The first frame of the animation.", default=0, min=0 ) frame_current: IntProperty( # type: ignore name="Frame current", description="The current frame of the animation.", default=100, min=0 ) frame_end: IntProperty( # type: ignore name="Frame end", description="The last frame of the animation.", default=100, min=0 ) timeline_markers: CollectionProperty( # type: ignore type=MCBLEND_TimelineMarkerProperties, name='Timeline Markers', description='Timeline markers related to this animation.' ) nla_tracks: CollectionProperty( # type: ignore type=MCBLEND_JustName ) # Material properties def list_mesh_types_as_blender_enum(self, context): '''List mesh types for EnumProperty.''' # pylint: disable=unused-argument return [(i.value, i.value, i.value) for i in MeshType] class MCBLEND_FakeRcMaterialProperties(bpy.types.PropertyGroup): ''' Pattern-material pair for MCBLEND_FakeRcProperties object. ''' pattern: StringProperty( # type: ignore name="", description="The bone name pattern for assigning material.", default="", maxlen=1024) material: StringProperty( # type: ignore name="", description="Name of the material used by this render controller", default="", maxlen=1024 ) class MCBLEND_FakeRcProperties(bpy.types.PropertyGroup): ''' Armature property group similar to Minecraft render controller used for generating Minecraft materials. ''' texture: StringProperty( # type: ignore name="", description="Name of the texture used by this render controller", default="", maxlen=1024 ) materials: CollectionProperty( # type: ignore type=MCBLEND_FakeRcMaterialProperties, name='Materials') class MCBLEND_ObjectProperties(bpy.types.PropertyGroup): '''Custom properties of an object.''' # ARMATURE PROPERTIES (equivalent of minecraft model) model_name: StringProperty( # type: ignore name="", description="Name of the model", default="model", maxlen=1024 ) texture_template_resolution: IntProperty( # type: ignore name="Template texture resolution", description=( 'Sets the resolution of the template texture.' 'describes how many pixels on the image is represented by one ' 'texture_width or texture_height unit in model definition. ' 'The value of 1 gives the standard minecraft texture ' 'resolution.' ), default=1, min=1, soft_max=5, ) allow_expanding: BoolProperty( # type: ignore name="Allow Texture Expanding", description="Allows expanding texture during texture generation.", default=True, ) generate_texture: BoolProperty( # type: ignore name="Generate texture", description="Generates texture during UV mapping.", default=True, ) visible_bounds_offset: FloatVectorProperty( # type: ignore name="Visible bounds offset", description="visible_bounds_offset of the model", default=(0.0, 0.0, 0.0) ) visible_bounds_width: FloatProperty( # type: ignore name="Visible bounds width", description="visible_bounds_width of the model", default=1.0 ) visible_bounds_height: FloatProperty( # type: ignore name="Visible bounds height", description="visible_bounds_height of the model", default=1.0 ) texture_width: IntProperty( # type: ignore name="", description="Minecraft UV parameter width.", default=64, min=1 ) texture_height: IntProperty( # type: ignore name="", description=( "Minecraft UV parameter height. If you set it to 0 than the height" " of the texture will be picked automatically for you." ), default=64, min=1 ) # RENDER CONTROLLERS (armature properties used for generating materials) render_controllers: CollectionProperty( # type: ignore type=MCBLEND_FakeRcProperties, name="Render Controllers" ) # ANIMATIONS # Animation properties active_animation: IntProperty(default=0) # type: ignore animations: CollectionProperty( # type: ignore type=MCBLEND_AnimationProperties) # CUBE PROPERTIES mirror: BoolProperty( # type: ignore name="Mirror", description="Defines how to layout the UV during UV generation.", default=False, ) uv_group: StringProperty( # type: ignore name="UV group", description=( "Objects with the same UV group can be mapped to the same spot on " "the texture if they have the same dimensions. Empty string means " "that the object doesn't belong to any UV group."), default="", maxlen=1024 ) inflate: FloatProperty( # type: ignore name="Inflate", description="The inflate value of this object.", default=0.0 ) mesh_type: EnumProperty( # type: ignore items=list_mesh_types_as_blender_enum, # type: ignore name='Mesh type') min_uv_size: IntVectorProperty( # type: ignore name="Min UV size", default=(0.0, 0.0, 0.0), min=0, description=( "The lower UV boundary of the length of X dimension of a cube. If " "it's greater than the actual X, then the UV-mapper will act as " "if the X were equal to this value.") ) class MCBLEND_BoneProperties(bpy.types.PropertyGroup): ''' Custom properties of a bone ''' binding: StringProperty( # type: ignore name="Binding", description="The equivalent of Minecraft binding property", default="", maxlen=1024 )
0.833528
0.247601
from typing import List, Tuple import torch from torch import nn from equideepdmri.layers.filter.filter_kernel import KernelDefinitionInterface from equideepdmri.utils.q_space import Q_SamplingSchema from equideepdmri.utils.spherical_tensor import SphericalTensorType class SumKernel(nn.Module): def __init__(self, type_out: SphericalTensorType, type_in: SphericalTensorType, Q_sampling_schema_out: Q_SamplingSchema, Q_sampling_schema_in: Q_SamplingSchema, P_diff_vectors: torch.Tensor, P_kernel_size: int, kernel_definitions: List[KernelDefinitionInterface]): super().__init__() self.kernels = nn.ModuleList([kernel_constructor(type_out, type_in, Q_sampling_schema_out, Q_sampling_schema_in, P_diff_vectors, P_kernel_size) for kernel_constructor in kernel_definitions]) def forward(self) -> torch.Tensor: """ :return: kernel (Q_out x Q_in x num_P_diff_vectors x type_out.dim x type_in.dim) """ # (N_kernels x Q_out x Q_in x num_P_diff_vectors x type_out.dim x type_in.dim) kernel_tensors = torch.stack([kernel() for kernel in self.kernels], dim=0) return kernel_tensors.sum(dim=0) / len(self.kernels) class ConcatKernel(nn.Module): def __init__(self, type_out: SphericalTensorType, type_in: SphericalTensorType, Q_sampling_schema_out: Q_SamplingSchema, Q_sampling_schema_in: Q_SamplingSchema, P_diff_vectors: torch.Tensor, P_kernel_size: int, kernel_definitions: List[Tuple[SphericalTensorType, KernelDefinitionInterface]]): """ :param type_out: :param type_in: :param Q_sampling_schema_out: :param Q_sampling_schema_in: :param P_diff_vectors: :param P_kernel_size: :param kernel_constructors: list of pairs (kernel_type_out, kernel_constructor) each representing a kernel to be concatenated. Note that all kernel_type_out concatenated need to be the same as type_out. """ super().__init__() result_type, self.concat_indices = SphericalTensorType.concat_tensor_types(*[kernel_type_out for kernel_type_out, _ in kernel_definitions]) assert result_type == type_out, f'The kernel output types ' \ f'{[kernel_type_out for kernel_type_out, _ in kernel_definitions]} ' \ f'cannot be concatenated to the type {type_out}' self.kernels = nn.ModuleList([kernel_definition(kernel_type_out, type_in, Q_sampling_schema_out, Q_sampling_schema_in, P_diff_vectors, P_kernel_size) for kernel_type_out, kernel_definition in kernel_definitions]) self.Q_out = Q_sampling_schema_out.Q self.Q_in = Q_sampling_schema_in.Q self.P_diff_vectors = P_diff_vectors self.num_P_diff_vectors, _ = P_diff_vectors.size() self.type_out = type_out self.type_in = type_in def forward(self) -> torch.Tensor: """ :return: kernel (Q_out x Q_in x num_P_diff_vectors x type_out.dim x type_in.dim) """ result_kernel = self.P_diff_vectors.new_zeros((self.Q_out, self.Q_in, self.num_P_diff_vectors, self.type_out.dim, self.type_in.dim)) kernel_tensors = [kernel() for kernel in self.kernels] for kernel_indices, kernel_tensor in zip(self.concat_indices, kernel_tensors): result_kernel[:, :, :, kernel_indices, :] = kernel_tensor return result_kernel
equideepdmri/layers/filter/combined_filter_kernels.py
from typing import List, Tuple import torch from torch import nn from equideepdmri.layers.filter.filter_kernel import KernelDefinitionInterface from equideepdmri.utils.q_space import Q_SamplingSchema from equideepdmri.utils.spherical_tensor import SphericalTensorType class SumKernel(nn.Module): def __init__(self, type_out: SphericalTensorType, type_in: SphericalTensorType, Q_sampling_schema_out: Q_SamplingSchema, Q_sampling_schema_in: Q_SamplingSchema, P_diff_vectors: torch.Tensor, P_kernel_size: int, kernel_definitions: List[KernelDefinitionInterface]): super().__init__() self.kernels = nn.ModuleList([kernel_constructor(type_out, type_in, Q_sampling_schema_out, Q_sampling_schema_in, P_diff_vectors, P_kernel_size) for kernel_constructor in kernel_definitions]) def forward(self) -> torch.Tensor: """ :return: kernel (Q_out x Q_in x num_P_diff_vectors x type_out.dim x type_in.dim) """ # (N_kernels x Q_out x Q_in x num_P_diff_vectors x type_out.dim x type_in.dim) kernel_tensors = torch.stack([kernel() for kernel in self.kernels], dim=0) return kernel_tensors.sum(dim=0) / len(self.kernels) class ConcatKernel(nn.Module): def __init__(self, type_out: SphericalTensorType, type_in: SphericalTensorType, Q_sampling_schema_out: Q_SamplingSchema, Q_sampling_schema_in: Q_SamplingSchema, P_diff_vectors: torch.Tensor, P_kernel_size: int, kernel_definitions: List[Tuple[SphericalTensorType, KernelDefinitionInterface]]): """ :param type_out: :param type_in: :param Q_sampling_schema_out: :param Q_sampling_schema_in: :param P_diff_vectors: :param P_kernel_size: :param kernel_constructors: list of pairs (kernel_type_out, kernel_constructor) each representing a kernel to be concatenated. Note that all kernel_type_out concatenated need to be the same as type_out. """ super().__init__() result_type, self.concat_indices = SphericalTensorType.concat_tensor_types(*[kernel_type_out for kernel_type_out, _ in kernel_definitions]) assert result_type == type_out, f'The kernel output types ' \ f'{[kernel_type_out for kernel_type_out, _ in kernel_definitions]} ' \ f'cannot be concatenated to the type {type_out}' self.kernels = nn.ModuleList([kernel_definition(kernel_type_out, type_in, Q_sampling_schema_out, Q_sampling_schema_in, P_diff_vectors, P_kernel_size) for kernel_type_out, kernel_definition in kernel_definitions]) self.Q_out = Q_sampling_schema_out.Q self.Q_in = Q_sampling_schema_in.Q self.P_diff_vectors = P_diff_vectors self.num_P_diff_vectors, _ = P_diff_vectors.size() self.type_out = type_out self.type_in = type_in def forward(self) -> torch.Tensor: """ :return: kernel (Q_out x Q_in x num_P_diff_vectors x type_out.dim x type_in.dim) """ result_kernel = self.P_diff_vectors.new_zeros((self.Q_out, self.Q_in, self.num_P_diff_vectors, self.type_out.dim, self.type_in.dim)) kernel_tensors = [kernel() for kernel in self.kernels] for kernel_indices, kernel_tensor in zip(self.concat_indices, kernel_tensors): result_kernel[:, :, :, kernel_indices, :] = kernel_tensor return result_kernel
0.963179
0.601389
# Get a permission denied error when running the script in the shell? chmod 755 the script .py file import sys import os import subprocess import shutil import http.client, urllib # COMIC_TAGGER_PATH = 'COMIC_TAGGER_PATH/Applications/ComicTagger.app/Contents/MacOS/ComicTagger' COMIC_TAGGER_PATH = "C:\\Program Files\\Comic Tagger\\comictagger.exe" # Possible path for Windows #COMIC_TAGGER_PATH = 'ComicTagger' # a local alias that points to the full path above, useful on MacOS HANDLED_EXTENSIONS = ['.cbr', '.cbz'] def escapeForShell(source): assert isinstance(source, str) return source.replace(' ', '\ ').replace('(', '\(').replace(')', '\)').replace("'", "\\'") class NotificationConfiguration: def __init__(self): self.app_token = "" self.user_key = "" class ArchiveRoute: """ Defines an archive metadata routing configuration """ metadataElement = "" metadataContent = "" target = "" def __init__(self, element, content, target): self.metadataElement = element self.metadataContent = content self.target = target def display(self): return "Metadata: {0} = {1}, target: {2}".format(self.metadataElement, self.metadataContent, self.target) class Configuration: """ Encapsulates the configuration options for ComicArchiveFiler """ configuration_path = "" target_path = "" send_notifications = False errors = list() pushover_configuration = NotificationConfiguration() routes = list() def __init__(self): arguments = sys.argv print(arguments) if len(arguments) < 3: # the sys.argv[0] contains the script name, so there is always at least one argument self.errors.append("Incorrect parameters!") for param in arguments[1:]: if param.startswith('-'): if param == '-n': self.send_notifications = True elif param.startswith("-pushover:"): pieces = param.split(":") self.pushover_configuration.app_token = pieces[1] self.pushover_configuration.user_key = pieces[2] else: self.errors.append("Unknown options {0}".format(param)) else: if self.configuration_path == "": self.configuration_path = param else: self.target_path = param if self.configuration_path == "": self.errors.append("You must specify a archive_path to a configuration file") else: if not os.path.exists(self.configuration_path): self.errors.append("Cannot locate configuration file path: {0}".format(self.configuration_path)) if self.target_path == "": self.errors.append("You must specify a target comic archive file") else: if not os.path.exists(self.target_path): self.errors.append("Cannot locate archive file path: {0}".format(self.target_path)) self.routes = self.readRoutingConfiguration(self.configuration_path) def valid(self): return len(self.errors) == 0 def readRoutingConfiguration(self, configuration_path): routes = list() with open(configuration_path) as f: lines = [line.rstrip('\n') for line in f if line != '' and line != '\n'] for line in lines: # print line pieces = line.split("->") if len(pieces) != 2: routing_configuration_error = "Routing configuration line must contain a '->': {0}".format(line) print(routing_configuration_error) if self.send_notifications: Notifications.pushNotification(self.pushover_configuration, routing_configuration_error, 1) quit() if ":" not in pieces[0]: print("Metadata specification must contain a ':' : {0}".format(pieces[0])) target = pieces[1].strip() metadata = [data.strip() for data in pieces[0].split(":", 1)] routes.append(ArchiveRoute(metadata[0], metadata[1], target)) return routes class Notifications: @staticmethod def pushNotification(pushover_configuration, message, priority = 0): # Pushover notification conn = http.client.HTTPSConnection("api.pushover.net:443") conn.request("POST", "/1/messages.json", urllib.urlencode({ "token": pushover_configuration.app_token, "user": pushover_configuration.user_key, "message": message, "priority": priority }), {"Content-type": "application/x-www-form-urlencoded"}) conn.getresponse() class ComicArchiveFiler: """ This class encapsulates the behaviour of the ComicArchiveFiler script """ def __init__(self): self.configuration = Configuration() if not self.configuration.valid(): for error in self.configuration.errors: print(error) self.outputHelp() return @staticmethod def outputHelp(): print('') print('Usage: ComicArchiveFiler [OPTIONS] <CONFIGURATIONFILE> <ARCHIVEFILE>') print('') print('Looks at the series metadata for a comic archive and move the file if a matching rule is found in the specified rule configuration file') print('') print('Options:') print(' -n : Send notifications') print(' -pushover:APP_TOKEN:USER_KEY') print('') @staticmethod def parseExistingTags(data): assert isinstance(data, str) # validate start_index = data.find('------ComicRack tags--------') if start_index == -1: start_index = data.find('--------- ComicRack tags ---------') if start_index == -1: return [] data = data[data.find('\n', start_index) + 1:] lines = data.splitlines() tags = {} for line in lines: if ':' not in line: continue pieces = line.split(':', 1) if len(pieces) > 1 and pieces[1] != '': tags[pieces[0]] = pieces[1].strip(' ') return tags @staticmethod def pushNotification(pushover_configuration, message, priority = 0): Notifications.pushNotification(pushover_configuration, message, priority) def processFile(self, file_path): assert isinstance(file_path, str) assert isinstance(self.configuration.send_notifications, bool) # check that file is a comic archive filename = os.path.split(file_path)[1] extension = os.path.splitext(file_path)[1] if extension not in HANDLED_EXTENSIONS: print("Skipping {0}. Not a recognised comic archive".format(filename)) return print("Processing: {0}".format(filename)) isWindows = os.name == 'nt' if isWindows: comicTaggerArgs = '"%s" -p %s' % (COMIC_TAGGER_PATH, escapeForShell(file_path)) else: comicTaggerArgs = '%s -p %s' % (COMIC_TAGGER_PATH, escapeForShell(file_path)) print(comicTaggerArgs) data = subprocess.run(args=comicTaggerArgs, capture_output=True, text=True) existing_tags = self.parseExistingTags(data.stdout) applicable_routes = [route for route in self.configuration.routes if route.metadataElement in existing_tags and existing_tags[ route.metadataElement].lower() == route.metadataContent.lower()] if len(applicable_routes) > 0: route = applicable_routes[0] print("Found matching route {0} for file {1}".format(route.display(), file_path)) # TODO: move file to route.target file_copied = False try: shutil.copy2(file_path, route.target) file_copied = True # send low priority notification that filing is complete if self.configuration.send_notifications: self.pushNotification(self.configuration.pushover_configuration, "Filed: {0}".format(filename), -1) except Exception: copy_error = "Error: Could not copy file {0} to {1}".format(file_path, route.target) print(copy_error) if self.configuration.send_notifications: self.pushNotification(self.configuration.pushover_configuration, copy_error, 1) pass if file_copied: try: os.remove(file_path) except Exception: delete_error = "Error: Could not delete file {0}".format(file_path) print(delete_error) if self.configuration.send_notifications: self.pushNotification(self.configuration.pushover_configuration, delete_error, 1) pass else: message = "Could not file {0}. No matching route found".format(filename) if self.configuration.send_notifications: self.pushNotification(self.configuration.pushover_configuration, message) else: print(message) def execute(self): if len(self.configuration.routes) < 1: print("Found no valid routing instructions in the configuration file") return if os.path.isdir(self.configuration.target_path): directory_list = os.listdir(self.configuration.target_path) for filename in directory_list: file_path = os.path.join(self.configuration.target_path, filename) if os.path.isfile(file_path): self.processFile(file_path) elif os.path.isfile(self.configuration.target_path): self.processFile(self.configuration.target_path) # Start of execution filer = ComicArchiveFiler() filer.execute()
ComicArchiveFiler.py
# Get a permission denied error when running the script in the shell? chmod 755 the script .py file import sys import os import subprocess import shutil import http.client, urllib # COMIC_TAGGER_PATH = 'COMIC_TAGGER_PATH/Applications/ComicTagger.app/Contents/MacOS/ComicTagger' COMIC_TAGGER_PATH = "C:\\Program Files\\Comic Tagger\\comictagger.exe" # Possible path for Windows #COMIC_TAGGER_PATH = 'ComicTagger' # a local alias that points to the full path above, useful on MacOS HANDLED_EXTENSIONS = ['.cbr', '.cbz'] def escapeForShell(source): assert isinstance(source, str) return source.replace(' ', '\ ').replace('(', '\(').replace(')', '\)').replace("'", "\\'") class NotificationConfiguration: def __init__(self): self.app_token = "" self.user_key = "" class ArchiveRoute: """ Defines an archive metadata routing configuration """ metadataElement = "" metadataContent = "" target = "" def __init__(self, element, content, target): self.metadataElement = element self.metadataContent = content self.target = target def display(self): return "Metadata: {0} = {1}, target: {2}".format(self.metadataElement, self.metadataContent, self.target) class Configuration: """ Encapsulates the configuration options for ComicArchiveFiler """ configuration_path = "" target_path = "" send_notifications = False errors = list() pushover_configuration = NotificationConfiguration() routes = list() def __init__(self): arguments = sys.argv print(arguments) if len(arguments) < 3: # the sys.argv[0] contains the script name, so there is always at least one argument self.errors.append("Incorrect parameters!") for param in arguments[1:]: if param.startswith('-'): if param == '-n': self.send_notifications = True elif param.startswith("-pushover:"): pieces = param.split(":") self.pushover_configuration.app_token = pieces[1] self.pushover_configuration.user_key = pieces[2] else: self.errors.append("Unknown options {0}".format(param)) else: if self.configuration_path == "": self.configuration_path = param else: self.target_path = param if self.configuration_path == "": self.errors.append("You must specify a archive_path to a configuration file") else: if not os.path.exists(self.configuration_path): self.errors.append("Cannot locate configuration file path: {0}".format(self.configuration_path)) if self.target_path == "": self.errors.append("You must specify a target comic archive file") else: if not os.path.exists(self.target_path): self.errors.append("Cannot locate archive file path: {0}".format(self.target_path)) self.routes = self.readRoutingConfiguration(self.configuration_path) def valid(self): return len(self.errors) == 0 def readRoutingConfiguration(self, configuration_path): routes = list() with open(configuration_path) as f: lines = [line.rstrip('\n') for line in f if line != '' and line != '\n'] for line in lines: # print line pieces = line.split("->") if len(pieces) != 2: routing_configuration_error = "Routing configuration line must contain a '->': {0}".format(line) print(routing_configuration_error) if self.send_notifications: Notifications.pushNotification(self.pushover_configuration, routing_configuration_error, 1) quit() if ":" not in pieces[0]: print("Metadata specification must contain a ':' : {0}".format(pieces[0])) target = pieces[1].strip() metadata = [data.strip() for data in pieces[0].split(":", 1)] routes.append(ArchiveRoute(metadata[0], metadata[1], target)) return routes class Notifications: @staticmethod def pushNotification(pushover_configuration, message, priority = 0): # Pushover notification conn = http.client.HTTPSConnection("api.pushover.net:443") conn.request("POST", "/1/messages.json", urllib.urlencode({ "token": pushover_configuration.app_token, "user": pushover_configuration.user_key, "message": message, "priority": priority }), {"Content-type": "application/x-www-form-urlencoded"}) conn.getresponse() class ComicArchiveFiler: """ This class encapsulates the behaviour of the ComicArchiveFiler script """ def __init__(self): self.configuration = Configuration() if not self.configuration.valid(): for error in self.configuration.errors: print(error) self.outputHelp() return @staticmethod def outputHelp(): print('') print('Usage: ComicArchiveFiler [OPTIONS] <CONFIGURATIONFILE> <ARCHIVEFILE>') print('') print('Looks at the series metadata for a comic archive and move the file if a matching rule is found in the specified rule configuration file') print('') print('Options:') print(' -n : Send notifications') print(' -pushover:APP_TOKEN:USER_KEY') print('') @staticmethod def parseExistingTags(data): assert isinstance(data, str) # validate start_index = data.find('------ComicRack tags--------') if start_index == -1: start_index = data.find('--------- ComicRack tags ---------') if start_index == -1: return [] data = data[data.find('\n', start_index) + 1:] lines = data.splitlines() tags = {} for line in lines: if ':' not in line: continue pieces = line.split(':', 1) if len(pieces) > 1 and pieces[1] != '': tags[pieces[0]] = pieces[1].strip(' ') return tags @staticmethod def pushNotification(pushover_configuration, message, priority = 0): Notifications.pushNotification(pushover_configuration, message, priority) def processFile(self, file_path): assert isinstance(file_path, str) assert isinstance(self.configuration.send_notifications, bool) # check that file is a comic archive filename = os.path.split(file_path)[1] extension = os.path.splitext(file_path)[1] if extension not in HANDLED_EXTENSIONS: print("Skipping {0}. Not a recognised comic archive".format(filename)) return print("Processing: {0}".format(filename)) isWindows = os.name == 'nt' if isWindows: comicTaggerArgs = '"%s" -p %s' % (COMIC_TAGGER_PATH, escapeForShell(file_path)) else: comicTaggerArgs = '%s -p %s' % (COMIC_TAGGER_PATH, escapeForShell(file_path)) print(comicTaggerArgs) data = subprocess.run(args=comicTaggerArgs, capture_output=True, text=True) existing_tags = self.parseExistingTags(data.stdout) applicable_routes = [route for route in self.configuration.routes if route.metadataElement in existing_tags and existing_tags[ route.metadataElement].lower() == route.metadataContent.lower()] if len(applicable_routes) > 0: route = applicable_routes[0] print("Found matching route {0} for file {1}".format(route.display(), file_path)) # TODO: move file to route.target file_copied = False try: shutil.copy2(file_path, route.target) file_copied = True # send low priority notification that filing is complete if self.configuration.send_notifications: self.pushNotification(self.configuration.pushover_configuration, "Filed: {0}".format(filename), -1) except Exception: copy_error = "Error: Could not copy file {0} to {1}".format(file_path, route.target) print(copy_error) if self.configuration.send_notifications: self.pushNotification(self.configuration.pushover_configuration, copy_error, 1) pass if file_copied: try: os.remove(file_path) except Exception: delete_error = "Error: Could not delete file {0}".format(file_path) print(delete_error) if self.configuration.send_notifications: self.pushNotification(self.configuration.pushover_configuration, delete_error, 1) pass else: message = "Could not file {0}. No matching route found".format(filename) if self.configuration.send_notifications: self.pushNotification(self.configuration.pushover_configuration, message) else: print(message) def execute(self): if len(self.configuration.routes) < 1: print("Found no valid routing instructions in the configuration file") return if os.path.isdir(self.configuration.target_path): directory_list = os.listdir(self.configuration.target_path) for filename in directory_list: file_path = os.path.join(self.configuration.target_path, filename) if os.path.isfile(file_path): self.processFile(file_path) elif os.path.isfile(self.configuration.target_path): self.processFile(self.configuration.target_path) # Start of execution filer = ComicArchiveFiler() filer.execute()
0.39712
0.14439
from __future__ import unicode_literals from django.db import models, migrations import fluent_contents.plugins.oembeditem.fields class Migration(migrations.Migration): dependencies = [ ('fluent_contents', '0001_initial'), ] operations = [ migrations.CreateModel( name='OEmbedWithCaptionItem', fields=[ ('contentitem_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')), ('embed_url', fluent_contents.plugins.oembeditem.fields.OEmbedUrlField(help_text='Enter the URL of the online content to embed (e.g. a YouTube or Vimeo video, SlideShare presentation, etc..)', verbose_name='URL to embed')), ('embed_max_width', models.PositiveIntegerField(null=True, verbose_name='Max width', blank=True)), ('embed_max_height', models.PositiveIntegerField(null=True, verbose_name='Max height', blank=True)), ('type', models.CharField(max_length=20, null=True, editable=False, blank=True)), ('url', models.URLField(null=True, editable=False, blank=True)), ('title', models.CharField(max_length=512, null=True, editable=False, blank=True)), ('description', models.TextField(null=True, editable=False, blank=True)), ('author_name', models.CharField(max_length=255, null=True, editable=False, blank=True)), ('author_url', models.URLField(null=True, editable=False, blank=True)), ('provider_name', models.CharField(max_length=255, null=True, editable=False, blank=True)), ('provider_url', models.URLField(null=True, editable=False, blank=True)), ('thumbnail_url', models.URLField(null=True, editable=False, blank=True)), ('thumbnail_height', models.IntegerField(null=True, editable=False, blank=True)), ('thumbnail_width', models.IntegerField(null=True, editable=False, blank=True)), ('height', models.IntegerField(null=True, editable=False, blank=True)), ('width', models.IntegerField(null=True, editable=False, blank=True)), ('html', models.TextField(null=True, editable=False, blank=True)), ('caption', models.TextField(blank=True)), ], options={ 'abstract': False, 'db_table': 'contentitem_oembed_with_caption_oembedwithcaptionitem', 'verbose_name': 'Online Media with Caption', }, bases=('fluent_contents.contentitem',), ), ]
icekit/plugins/oembed_with_caption/migrations/0001_initial.py
from __future__ import unicode_literals from django.db import models, migrations import fluent_contents.plugins.oembeditem.fields class Migration(migrations.Migration): dependencies = [ ('fluent_contents', '0001_initial'), ] operations = [ migrations.CreateModel( name='OEmbedWithCaptionItem', fields=[ ('contentitem_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')), ('embed_url', fluent_contents.plugins.oembeditem.fields.OEmbedUrlField(help_text='Enter the URL of the online content to embed (e.g. a YouTube or Vimeo video, SlideShare presentation, etc..)', verbose_name='URL to embed')), ('embed_max_width', models.PositiveIntegerField(null=True, verbose_name='Max width', blank=True)), ('embed_max_height', models.PositiveIntegerField(null=True, verbose_name='Max height', blank=True)), ('type', models.CharField(max_length=20, null=True, editable=False, blank=True)), ('url', models.URLField(null=True, editable=False, blank=True)), ('title', models.CharField(max_length=512, null=True, editable=False, blank=True)), ('description', models.TextField(null=True, editable=False, blank=True)), ('author_name', models.CharField(max_length=255, null=True, editable=False, blank=True)), ('author_url', models.URLField(null=True, editable=False, blank=True)), ('provider_name', models.CharField(max_length=255, null=True, editable=False, blank=True)), ('provider_url', models.URLField(null=True, editable=False, blank=True)), ('thumbnail_url', models.URLField(null=True, editable=False, blank=True)), ('thumbnail_height', models.IntegerField(null=True, editable=False, blank=True)), ('thumbnail_width', models.IntegerField(null=True, editable=False, blank=True)), ('height', models.IntegerField(null=True, editable=False, blank=True)), ('width', models.IntegerField(null=True, editable=False, blank=True)), ('html', models.TextField(null=True, editable=False, blank=True)), ('caption', models.TextField(blank=True)), ], options={ 'abstract': False, 'db_table': 'contentitem_oembed_with_caption_oembedwithcaptionitem', 'verbose_name': 'Online Media with Caption', }, bases=('fluent_contents.contentitem',), ), ]
0.619126
0.15374
def viterbi(tags, sent, transition, emission): lower_sent = [word.lower() for word in sent] # In the Stanford pseudo-code, tag_probs is 'viterbi' and actual_tags is 'backpointer' tag_probs = [{}] actual_tags = [{}] # Initialization step for tag in tags: # Multiply the probability that the first tag comes after a "." by the probability of the observation given # the tag. Also sentences start with "." tag_probs[0][tag] = transition["."].prob(tag) * emission[tag].prob(lower_sent[0]) actual_tags[0][tag] = None # Recursion step for index in range(1, len(lower_sent)): # Initialize tag probability dictionary (this_tag_prob) and backpointer dictionary (this_actual_tag) this_tag_prob = {} this_actual_tag = {} # Retrieve the probability dictionary for the previous observation. prev_tag_prob = tag_probs[-1] for tag in tags: # Determine the probability of each tag occurring and retrieve the most likely previous tag path given the # current tag. best_prev = max(prev_tag_prob.keys(), key=lambda prev_tag: prev_tag_prob[prev_tag] * transition[prev_tag].prob(tag) * emission[tag].prob(lower_sent[index])) this_actual_tag[tag] = best_prev # Using the most likely previous tag determine the probability of the current tag occurring. this_tag_prob[tag] = prev_tag_prob[best_prev] * transition[best_prev].prob(tag) * \ emission[tag].prob(lower_sent[index]) tag_probs.append(this_tag_prob) actual_tags.append(this_actual_tag) # Termination step prev_tag_prob = tag_probs[-1] # Repeat what was done previously but now looking for "." to mark the end of the sentence. best_prev = max(prev_tag_prob.keys(), key=lambda prev_tag: prev_tag_prob[prev_tag] * transition[prev_tag].prob(".")) best_tags_prob = prev_tag_prob[best_prev] * transition[best_prev].prob(".") # best_tags is the list of tags or hidden states that will be returned best_tags = [".", best_prev] # Go backwards through actual_tags to figure out best tag for each word # and populate best_tags actual_tags.reverse() this_best_tag = best_prev for tag in actual_tags: best_tags.append(tag[this_best_tag]) this_best_tag = tag[this_best_tag] # Reverse best_tags to match pos tags with word order best_tags.reverse() return {"predicted_tags": best_tags, "probability": best_tags_prob}
viterbi.py
def viterbi(tags, sent, transition, emission): lower_sent = [word.lower() for word in sent] # In the Stanford pseudo-code, tag_probs is 'viterbi' and actual_tags is 'backpointer' tag_probs = [{}] actual_tags = [{}] # Initialization step for tag in tags: # Multiply the probability that the first tag comes after a "." by the probability of the observation given # the tag. Also sentences start with "." tag_probs[0][tag] = transition["."].prob(tag) * emission[tag].prob(lower_sent[0]) actual_tags[0][tag] = None # Recursion step for index in range(1, len(lower_sent)): # Initialize tag probability dictionary (this_tag_prob) and backpointer dictionary (this_actual_tag) this_tag_prob = {} this_actual_tag = {} # Retrieve the probability dictionary for the previous observation. prev_tag_prob = tag_probs[-1] for tag in tags: # Determine the probability of each tag occurring and retrieve the most likely previous tag path given the # current tag. best_prev = max(prev_tag_prob.keys(), key=lambda prev_tag: prev_tag_prob[prev_tag] * transition[prev_tag].prob(tag) * emission[tag].prob(lower_sent[index])) this_actual_tag[tag] = best_prev # Using the most likely previous tag determine the probability of the current tag occurring. this_tag_prob[tag] = prev_tag_prob[best_prev] * transition[best_prev].prob(tag) * \ emission[tag].prob(lower_sent[index]) tag_probs.append(this_tag_prob) actual_tags.append(this_actual_tag) # Termination step prev_tag_prob = tag_probs[-1] # Repeat what was done previously but now looking for "." to mark the end of the sentence. best_prev = max(prev_tag_prob.keys(), key=lambda prev_tag: prev_tag_prob[prev_tag] * transition[prev_tag].prob(".")) best_tags_prob = prev_tag_prob[best_prev] * transition[best_prev].prob(".") # best_tags is the list of tags or hidden states that will be returned best_tags = [".", best_prev] # Go backwards through actual_tags to figure out best tag for each word # and populate best_tags actual_tags.reverse() this_best_tag = best_prev for tag in actual_tags: best_tags.append(tag[this_best_tag]) this_best_tag = tag[this_best_tag] # Reverse best_tags to match pos tags with word order best_tags.reverse() return {"predicted_tags": best_tags, "probability": best_tags_prob}
0.743634
0.560493
from datetime import datetime, timedelta from django.contrib.admin.views.decorators import staff_member_required from django.contrib.auth.decorators import login_required from django.http import HttpResponse, JsonResponse import clients.models as clients import directory.models as directory from appconf.manager import SettingManager from rmis_integration.client import Client from slog.models import Log as slog CLEANUP_TYPES_LOG = ( 1, 2, 3, 4, 5, 6, 10, 16, 17, 18, 19, 20, 25, 27, 22, 23, 100, 998, 999, 1001, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 3000, 3001, 5000, 6000, 10000, 20000, 60001, 60003, ) @login_required @staff_member_required def log(request): response = {"cnt": slog.objects.all().count(), "store_days": SettingManager.get("max_log_store_days", "120", "i")} response["to_delete"] = slog.objects.filter(time__lt=datetime.today() - timedelta(days=response["store_days"]), type__in=CLEANUP_TYPES_LOG).count() return JsonResponse(response) @login_required @staff_member_required def log_cleanup(request): _, cnt = slog.objects.filter(time__lt=datetime.today() - timedelta(days=SettingManager.get("max_log_store_days", "120", "i")), type__in=CLEANUP_TYPES_LOG).delete() return HttpResponse(str(cnt.get("slog.Log", 0)), content_type="text/plain") @login_required @staff_member_required def db(request): response = [] return JsonResponse(response, safe=False) @login_required @staff_member_required def rmis_check(request): c = Client() return HttpResponse(c.search_organization_id(check=True) + " " + c.search_dep_id(check=True), content_type="text/plain") @login_required @staff_member_required def archive_without_directions(request): objs = clients.Card.objects.filter(napravleniya__isnull=True, is_archive=True) cnt = objs.count() if request.GET.get("remove", "0") == "1": _, cnt = objs.delete() cnt = cnt.get("clients.Card", 0) return HttpResponse(str(cnt), content_type="text/plain") @login_required @staff_member_required def patients_without_cards(request): objs = clients.Individual.objects.filter(card__isnull=True) cnt = objs.count() if request.GET.get("remove", "0") == "1": _, cnt = objs.delete() cnt = cnt.get("clients.Individual", 0) return HttpResponse(str(cnt), content_type="text/plain") @login_required @staff_member_required def sync_departments(request): c = Client() return HttpResponse("Добавлено: %s. Обновлено: %s." % c.department.sync_departments(), content_type="text/plain") @login_required @staff_member_required def sync_researches(request): r = directory.Researches.objects.filter(podrazdeleniye__isnull=True, subgroup__isnull=False) cnt = r.count() for research in r: research.podrazdeleniye = research.subgroup.podrazdeleniye research.save() return HttpResponse(str(cnt), content_type="text/plain")
health/views.py
from datetime import datetime, timedelta from django.contrib.admin.views.decorators import staff_member_required from django.contrib.auth.decorators import login_required from django.http import HttpResponse, JsonResponse import clients.models as clients import directory.models as directory from appconf.manager import SettingManager from rmis_integration.client import Client from slog.models import Log as slog CLEANUP_TYPES_LOG = ( 1, 2, 3, 4, 5, 6, 10, 16, 17, 18, 19, 20, 25, 27, 22, 23, 100, 998, 999, 1001, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 3000, 3001, 5000, 6000, 10000, 20000, 60001, 60003, ) @login_required @staff_member_required def log(request): response = {"cnt": slog.objects.all().count(), "store_days": SettingManager.get("max_log_store_days", "120", "i")} response["to_delete"] = slog.objects.filter(time__lt=datetime.today() - timedelta(days=response["store_days"]), type__in=CLEANUP_TYPES_LOG).count() return JsonResponse(response) @login_required @staff_member_required def log_cleanup(request): _, cnt = slog.objects.filter(time__lt=datetime.today() - timedelta(days=SettingManager.get("max_log_store_days", "120", "i")), type__in=CLEANUP_TYPES_LOG).delete() return HttpResponse(str(cnt.get("slog.Log", 0)), content_type="text/plain") @login_required @staff_member_required def db(request): response = [] return JsonResponse(response, safe=False) @login_required @staff_member_required def rmis_check(request): c = Client() return HttpResponse(c.search_organization_id(check=True) + " " + c.search_dep_id(check=True), content_type="text/plain") @login_required @staff_member_required def archive_without_directions(request): objs = clients.Card.objects.filter(napravleniya__isnull=True, is_archive=True) cnt = objs.count() if request.GET.get("remove", "0") == "1": _, cnt = objs.delete() cnt = cnt.get("clients.Card", 0) return HttpResponse(str(cnt), content_type="text/plain") @login_required @staff_member_required def patients_without_cards(request): objs = clients.Individual.objects.filter(card__isnull=True) cnt = objs.count() if request.GET.get("remove", "0") == "1": _, cnt = objs.delete() cnt = cnt.get("clients.Individual", 0) return HttpResponse(str(cnt), content_type="text/plain") @login_required @staff_member_required def sync_departments(request): c = Client() return HttpResponse("Добавлено: %s. Обновлено: %s." % c.department.sync_departments(), content_type="text/plain") @login_required @staff_member_required def sync_researches(request): r = directory.Researches.objects.filter(podrazdeleniye__isnull=True, subgroup__isnull=False) cnt = r.count() for research in r: research.podrazdeleniye = research.subgroup.podrazdeleniye research.save() return HttpResponse(str(cnt), content_type="text/plain")
0.442637
0.073264
import logging from googleads import ad_manager import settings from dfp.client import get_client from dfp.exceptions import ( BadSettingException, DFPObjectNotFound, MissingSettingException ) logger = logging.getLogger(__name__) def create_advertiser(name): """ Creates a DFP advertiser with name `name` and returns its ID. Args: name (str): the name of the DFP advertiser Returns: an integer: the advertiser's DFP ID """ dfp_client = get_client() company_service = dfp_client.GetService('CompanyService', version='v201811') advertisers_config = [ { 'name': name, 'type': 'AD_NETWORK' } ] advertisers = company_service.createCompanies(advertisers_config) advertiser = advertisers[0] # Display results. for advertiser in advertisers: logger.info(u'Created an advertiser with name "{name}" and ' 'type "{type}".'.format(name=advertiser['name'], type=advertiser['type'])) return advertiser def get_advertiser_id_by_name(name): """ Returns a DFP company ID from company name. Args: name (str): the name of the DFP advertiser Returns: an integer: the advertiser's DFP ID """ dfp_client = get_client() company_service = dfp_client.GetService('CompanyService', version='v201811') # Filter by name. query = 'WHERE name = :name' values = [ {'key': 'name', 'value': { 'xsi_type': 'TextValue', 'value': name }}, ] statement = ad_manager.FilterStatement(query, values) response = company_service.getCompaniesByStatement(statement.ToStatement()) # A company is required. no_company_found = False try: no_company_found = True if len(response['results']) < 1 else False except (AttributeError, KeyError): no_company_found = True if no_company_found: if getattr(settings, 'DFP_CREATE_ADVERTISER_IF_DOES_NOT_EXIST', False): advertiser = create_advertiser(name) else: raise DFPObjectNotFound('No advertiser found with name {0}'.format(name)) elif len(response['results']) > 1: raise BadSettingException( 'Multiple advertisers found with name {0}'.format(name)) else: advertiser = response['results'][0] logger.info(u'Using existing advertiser with name "{name}" and ' 'type "{type}".'.format(name=advertiser['name'], type=advertiser['type'])) return advertiser['id'] def main(): """ Gets the company name from settings and fetches its ID. Returns: an integer: the company's DFP ID """ advertiser_name = getattr(settings, 'DFP_ADVERTISER_NAME', None) if advertiser_name is None: raise MissingSettingException('DFP_ADVERTISER_NAME') return get_advertiser_id_by_name(advertiser_name) if __name__ == '__main__': main()
dfp/get_advertisers.py
import logging from googleads import ad_manager import settings from dfp.client import get_client from dfp.exceptions import ( BadSettingException, DFPObjectNotFound, MissingSettingException ) logger = logging.getLogger(__name__) def create_advertiser(name): """ Creates a DFP advertiser with name `name` and returns its ID. Args: name (str): the name of the DFP advertiser Returns: an integer: the advertiser's DFP ID """ dfp_client = get_client() company_service = dfp_client.GetService('CompanyService', version='v201811') advertisers_config = [ { 'name': name, 'type': 'AD_NETWORK' } ] advertisers = company_service.createCompanies(advertisers_config) advertiser = advertisers[0] # Display results. for advertiser in advertisers: logger.info(u'Created an advertiser with name "{name}" and ' 'type "{type}".'.format(name=advertiser['name'], type=advertiser['type'])) return advertiser def get_advertiser_id_by_name(name): """ Returns a DFP company ID from company name. Args: name (str): the name of the DFP advertiser Returns: an integer: the advertiser's DFP ID """ dfp_client = get_client() company_service = dfp_client.GetService('CompanyService', version='v201811') # Filter by name. query = 'WHERE name = :name' values = [ {'key': 'name', 'value': { 'xsi_type': 'TextValue', 'value': name }}, ] statement = ad_manager.FilterStatement(query, values) response = company_service.getCompaniesByStatement(statement.ToStatement()) # A company is required. no_company_found = False try: no_company_found = True if len(response['results']) < 1 else False except (AttributeError, KeyError): no_company_found = True if no_company_found: if getattr(settings, 'DFP_CREATE_ADVERTISER_IF_DOES_NOT_EXIST', False): advertiser = create_advertiser(name) else: raise DFPObjectNotFound('No advertiser found with name {0}'.format(name)) elif len(response['results']) > 1: raise BadSettingException( 'Multiple advertisers found with name {0}'.format(name)) else: advertiser = response['results'][0] logger.info(u'Using existing advertiser with name "{name}" and ' 'type "{type}".'.format(name=advertiser['name'], type=advertiser['type'])) return advertiser['id'] def main(): """ Gets the company name from settings and fetches its ID. Returns: an integer: the company's DFP ID """ advertiser_name = getattr(settings, 'DFP_ADVERTISER_NAME', None) if advertiser_name is None: raise MissingSettingException('DFP_ADVERTISER_NAME') return get_advertiser_id_by_name(advertiser_name) if __name__ == '__main__': main()
0.60743
0.088662
import base64 import json import pytest from pytest import fixture from chalice import app from chalice import NotFoundError def create_event(uri, method, path, content_type='application/json'): return { 'context': { 'http-method': method, 'resource-path': uri, }, 'params': { 'header': { 'Content-Type': content_type, }, 'path': path, 'querystring': {}, }, 'body-json': {}, 'base64-body': "", 'stage-variables': {}, } def create_event_with_body(body, uri='/', method='POST', content_type='application/json'): event = create_event(uri, method, {}, content_type) event['body-json'] = body if content_type == 'application/json': event['base64-body'] = base64.b64encode(json.dumps(body)) else: event['base64-body'] = base64.b64encode(body) return event @fixture def sample_app(): demo = app.Chalice('demo-app') @demo.route('/index', methods=['GET']) def index(): return {'hello': 'world'} @demo.route('/name/{name}', methods=['GET']) def name(name): return {'provided-name': name} return demo def test_can_parse_route_view_args(): entry = app.RouteEntry(lambda: {"foo": "bar"}, 'view-name', '/foo/{bar}/baz/{qux}', methods=['GET']) assert entry.view_args == ['bar', 'qux'] def test_can_route_single_view(): demo = app.Chalice('app-name') @demo.route('/index') def index_view(): return {} assert demo.routes['/index'] == app.RouteEntry(index_view, 'index_view', '/index', ['GET'], content_types=['application/json']) def test_can_handle_multiple_routes(): demo = app.Chalice('app-name') @demo.route('/index') def index_view(): return {} @demo.route('/other') def other_view(): return {} assert len(demo.routes) == 2, demo.routes assert '/index' in demo.routes, demo.routes assert '/other' in demo.routes, demo.routes assert demo.routes['/index'].view_function == index_view assert demo.routes['/other'].view_function == other_view def test_error_on_unknown_event(sample_app): bad_event = {'random': 'event'} with pytest.raises(app.ChaliceError): sample_app(bad_event, context=None) def test_can_route_api_call_to_view_function(sample_app): event = create_event('/index', 'GET', {}) response = sample_app(event, context=None) assert response == {'hello': 'world'} def test_can_call_to_dict_on_current_request(sample_app): @sample_app.route('/todict') def todict(): return sample_app.current_request.to_dict() event = create_event('/todict', 'GET', {}) response = sample_app(event, context=None) assert isinstance(response, dict) # The dict can change over time so we'll just pick # out a few keys as a basic sanity test. assert response['method'] == 'GET' assert response['json_body'] == {} # We also want to verify that to_dict() is always # JSON serializable so we check we can roundtrip # the data to/from JSON. assert isinstance(json.loads(json.dumps(response)), dict) def test_will_pass_captured_params_to_view(sample_app): event = create_event('/name/{name}', 'GET', {'name': 'james'}) response = sample_app(event, context=None) assert response == {'provided-name': 'james'} def test_error_on_unsupported_method(sample_app): event = create_event('/name/{name}', 'POST', {'name': 'james'}) with pytest.raises(app.ChaliceError): sample_app(event, context=None) def test_no_view_function_found(sample_app): bad_path = create_event('/noexist', 'GET', {}) with pytest.raises(app.ChaliceError): sample_app(bad_path, context=None) def test_can_access_raw_body(): demo = app.Chalice('app-name') @demo.route('/index') def index_view(): return {'rawbody': demo.current_request.raw_body} event = create_event('/index', 'GET', {}) event['base64-body'] = base64.b64encode('{"hello": "world"}') result = demo(event, context=None) assert result == {'rawbody': '{"hello": "world"}'} def test_raw_body_cache_returns_same_result(): demo = app.Chalice('app-name') @demo.route('/index') def index_view(): # The first raw_body decodes base64, # the second value should return the cached value. # Both should be the same value return {'rawbody': demo.current_request.raw_body, 'rawbody2': demo.current_request.raw_body} event = create_event('/index', 'GET', {}) event['base64-body'] = base64.b64encode('{"hello": "world"}') result = demo(event, context=None) assert result['rawbody'] == result['rawbody2'] def test_error_on_duplicate_routes(): demo = app.Chalice('app-name') @demo.route('/index', methods=['PUT']) def index_view(): return {'foo': 'bar'} with pytest.raises(ValueError): @demo.route('/index', methods=['POST']) def index_post(): return {'foo': 'bar'} def test_json_body_available_with_right_content_type(): demo = app.Chalice('demo-app') @demo.route('/', methods=['POST']) def index(): return demo.current_request.json_body event = create_event('/', 'POST', {}) event['body-json'] = {'foo': 'bar'} result = demo(event, context=None) assert result == event['body-json'] def test_cant_access_json_body_with_wrong_content_type(): demo = app.Chalice('demo-app') @demo.route('/', methods=['POST'], content_types=['application/xml']) def index(): return (demo.current_request.json_body, demo.current_request.raw_body) event = create_event('/', 'POST', {}, content_type='application/xml') event['body-json'] = '<Message>hello</Message>' event['base64-body'] = base64.b64encode('<Message>hello</Message>') json_body, raw_body = demo(event, context=None) assert json_body is None assert raw_body == '<Message>hello</Message>' def test_json_body_available_on_multiple_content_types(): demo = app.Chalice('demo-app') @demo.route('/', methods=['POST'], content_types=['application/xml', 'application/json']) def index(): return (demo.current_request.json_body, demo.current_request.raw_body) event = create_event_with_body('<Message>hello</Message>', content_type='application/xml') json_body, raw_body = demo(event, context=None) assert json_body is None assert raw_body == '<Message>hello</Message>' # Now if we create an event with JSON, we should be able # to access .json_body as well. event = create_event_with_body({'foo': 'bar'}, content_type='application/json') json_body, raw_body = demo(event, context=None) assert json_body == {'foo': 'bar'} assert raw_body == '{"foo": "bar"}' def test_json_body_available_with_lowercase_content_type_key(): demo = app.Chalice('demo-app') @demo.route('/', methods=['POST']) def index(): return (demo.current_request.json_body, demo.current_request.raw_body) event = create_event_with_body({'foo': 'bar'}) del event['params']['header']['Content-Type'] event['params']['header']['content-type'] = 'application/json' json_body, raw_body = demo(event, context=None) assert json_body == {'foo': 'bar'} assert raw_body == '{"foo": "bar"}' def test_content_types_must_be_lists(): demo = app.Chalice('app-name') with pytest.raises(ValueError): @demo.route('/index', content_types='application/not-a-list') def index_post(): return {'foo': 'bar'} def test_route_equality(): view_function = lambda: {"hello": "world"} a = app.RouteEntry( view_function, view_name='myview', path='/', methods=['GET'], authorization_type='foo', api_key_required=True, content_types=['application/json'], ) b = app.RouteEntry( view_function, view_name='myview', path='/', methods=['GET'], authorization_type='foo', api_key_required=True, content_types=['application/json'], ) assert a == b def test_route_inequality(): view_function = lambda: {"hello": "world"} a = app.RouteEntry( view_function, view_name='myview', path='/', methods=['GET'], authorization_type='foo', api_key_required=True, content_types=['application/json'], ) b = app.RouteEntry( view_function, view_name='myview', path='/', methods=['GET'], authorization_type='foo', api_key_required=True, # Different content types content_types=['application/xml'], ) assert not a == b def test_exceptions_raised_as_chalice_errors(sample_app): @sample_app.route('/error') def raise_error(): raise TypeError("Raising arbitrary error, should never see.") event = create_event('/error', 'GET', {}) # This is intentional behavior. If we're not in debug mode # we don't want to surface internal errors that get raised. # We should reply with a general internal server error. with pytest.raises(app.ChaliceViewError): sample_app(event, context=None) def test_original_exception_raised_in_debug_mode(sample_app): sample_app.debug = True @sample_app.route('/error') def raise_error(): raise ValueError("You will see this error") event = create_event('/error', 'GET', {}) with pytest.raises(ValueError) as e: sample_app(event, context=None) # In debug mode, we let the original exception propagate. # This includes the original type as well as the message. assert str(e.value) == 'You will see this error' def test_chalice_view_errors_propagate_in_non_debug_mode(sample_app): @sample_app.route('/notfound') def notfound(): raise NotFoundError("resource not found") event = create_event('/notfound', 'GET', {}) with pytest.raises(NotFoundError): sample_app(event, context=None) def test_chalice_view_errors_propagate_in_debug_mode(sample_app): @sample_app.route('/notfound') def notfound(): raise NotFoundError("resource not found") sample_app.debug = True event = create_event('/notfound', 'GET', {}) with pytest.raises(NotFoundError): sample_app(event, context=None) def test_case_insensitive_mapping(): mapping = app.CaseInsensitiveMapping({'HEADER': 'Value'}) assert mapping['hEAdEr'] assert mapping.get('hEAdEr') assert 'hEAdEr' in mapping assert repr({'header': 'Value'}) in repr(mapping) def test_unknown_kwargs_raise_error(sample_app): with pytest.raises(TypeError): @sample_app.route('/foo', unknown_kwargs='foo') def badkwargs(): pass
tests/unit/test_app.py
import base64 import json import pytest from pytest import fixture from chalice import app from chalice import NotFoundError def create_event(uri, method, path, content_type='application/json'): return { 'context': { 'http-method': method, 'resource-path': uri, }, 'params': { 'header': { 'Content-Type': content_type, }, 'path': path, 'querystring': {}, }, 'body-json': {}, 'base64-body': "", 'stage-variables': {}, } def create_event_with_body(body, uri='/', method='POST', content_type='application/json'): event = create_event(uri, method, {}, content_type) event['body-json'] = body if content_type == 'application/json': event['base64-body'] = base64.b64encode(json.dumps(body)) else: event['base64-body'] = base64.b64encode(body) return event @fixture def sample_app(): demo = app.Chalice('demo-app') @demo.route('/index', methods=['GET']) def index(): return {'hello': 'world'} @demo.route('/name/{name}', methods=['GET']) def name(name): return {'provided-name': name} return demo def test_can_parse_route_view_args(): entry = app.RouteEntry(lambda: {"foo": "bar"}, 'view-name', '/foo/{bar}/baz/{qux}', methods=['GET']) assert entry.view_args == ['bar', 'qux'] def test_can_route_single_view(): demo = app.Chalice('app-name') @demo.route('/index') def index_view(): return {} assert demo.routes['/index'] == app.RouteEntry(index_view, 'index_view', '/index', ['GET'], content_types=['application/json']) def test_can_handle_multiple_routes(): demo = app.Chalice('app-name') @demo.route('/index') def index_view(): return {} @demo.route('/other') def other_view(): return {} assert len(demo.routes) == 2, demo.routes assert '/index' in demo.routes, demo.routes assert '/other' in demo.routes, demo.routes assert demo.routes['/index'].view_function == index_view assert demo.routes['/other'].view_function == other_view def test_error_on_unknown_event(sample_app): bad_event = {'random': 'event'} with pytest.raises(app.ChaliceError): sample_app(bad_event, context=None) def test_can_route_api_call_to_view_function(sample_app): event = create_event('/index', 'GET', {}) response = sample_app(event, context=None) assert response == {'hello': 'world'} def test_can_call_to_dict_on_current_request(sample_app): @sample_app.route('/todict') def todict(): return sample_app.current_request.to_dict() event = create_event('/todict', 'GET', {}) response = sample_app(event, context=None) assert isinstance(response, dict) # The dict can change over time so we'll just pick # out a few keys as a basic sanity test. assert response['method'] == 'GET' assert response['json_body'] == {} # We also want to verify that to_dict() is always # JSON serializable so we check we can roundtrip # the data to/from JSON. assert isinstance(json.loads(json.dumps(response)), dict) def test_will_pass_captured_params_to_view(sample_app): event = create_event('/name/{name}', 'GET', {'name': 'james'}) response = sample_app(event, context=None) assert response == {'provided-name': 'james'} def test_error_on_unsupported_method(sample_app): event = create_event('/name/{name}', 'POST', {'name': 'james'}) with pytest.raises(app.ChaliceError): sample_app(event, context=None) def test_no_view_function_found(sample_app): bad_path = create_event('/noexist', 'GET', {}) with pytest.raises(app.ChaliceError): sample_app(bad_path, context=None) def test_can_access_raw_body(): demo = app.Chalice('app-name') @demo.route('/index') def index_view(): return {'rawbody': demo.current_request.raw_body} event = create_event('/index', 'GET', {}) event['base64-body'] = base64.b64encode('{"hello": "world"}') result = demo(event, context=None) assert result == {'rawbody': '{"hello": "world"}'} def test_raw_body_cache_returns_same_result(): demo = app.Chalice('app-name') @demo.route('/index') def index_view(): # The first raw_body decodes base64, # the second value should return the cached value. # Both should be the same value return {'rawbody': demo.current_request.raw_body, 'rawbody2': demo.current_request.raw_body} event = create_event('/index', 'GET', {}) event['base64-body'] = base64.b64encode('{"hello": "world"}') result = demo(event, context=None) assert result['rawbody'] == result['rawbody2'] def test_error_on_duplicate_routes(): demo = app.Chalice('app-name') @demo.route('/index', methods=['PUT']) def index_view(): return {'foo': 'bar'} with pytest.raises(ValueError): @demo.route('/index', methods=['POST']) def index_post(): return {'foo': 'bar'} def test_json_body_available_with_right_content_type(): demo = app.Chalice('demo-app') @demo.route('/', methods=['POST']) def index(): return demo.current_request.json_body event = create_event('/', 'POST', {}) event['body-json'] = {'foo': 'bar'} result = demo(event, context=None) assert result == event['body-json'] def test_cant_access_json_body_with_wrong_content_type(): demo = app.Chalice('demo-app') @demo.route('/', methods=['POST'], content_types=['application/xml']) def index(): return (demo.current_request.json_body, demo.current_request.raw_body) event = create_event('/', 'POST', {}, content_type='application/xml') event['body-json'] = '<Message>hello</Message>' event['base64-body'] = base64.b64encode('<Message>hello</Message>') json_body, raw_body = demo(event, context=None) assert json_body is None assert raw_body == '<Message>hello</Message>' def test_json_body_available_on_multiple_content_types(): demo = app.Chalice('demo-app') @demo.route('/', methods=['POST'], content_types=['application/xml', 'application/json']) def index(): return (demo.current_request.json_body, demo.current_request.raw_body) event = create_event_with_body('<Message>hello</Message>', content_type='application/xml') json_body, raw_body = demo(event, context=None) assert json_body is None assert raw_body == '<Message>hello</Message>' # Now if we create an event with JSON, we should be able # to access .json_body as well. event = create_event_with_body({'foo': 'bar'}, content_type='application/json') json_body, raw_body = demo(event, context=None) assert json_body == {'foo': 'bar'} assert raw_body == '{"foo": "bar"}' def test_json_body_available_with_lowercase_content_type_key(): demo = app.Chalice('demo-app') @demo.route('/', methods=['POST']) def index(): return (demo.current_request.json_body, demo.current_request.raw_body) event = create_event_with_body({'foo': 'bar'}) del event['params']['header']['Content-Type'] event['params']['header']['content-type'] = 'application/json' json_body, raw_body = demo(event, context=None) assert json_body == {'foo': 'bar'} assert raw_body == '{"foo": "bar"}' def test_content_types_must_be_lists(): demo = app.Chalice('app-name') with pytest.raises(ValueError): @demo.route('/index', content_types='application/not-a-list') def index_post(): return {'foo': 'bar'} def test_route_equality(): view_function = lambda: {"hello": "world"} a = app.RouteEntry( view_function, view_name='myview', path='/', methods=['GET'], authorization_type='foo', api_key_required=True, content_types=['application/json'], ) b = app.RouteEntry( view_function, view_name='myview', path='/', methods=['GET'], authorization_type='foo', api_key_required=True, content_types=['application/json'], ) assert a == b def test_route_inequality(): view_function = lambda: {"hello": "world"} a = app.RouteEntry( view_function, view_name='myview', path='/', methods=['GET'], authorization_type='foo', api_key_required=True, content_types=['application/json'], ) b = app.RouteEntry( view_function, view_name='myview', path='/', methods=['GET'], authorization_type='foo', api_key_required=True, # Different content types content_types=['application/xml'], ) assert not a == b def test_exceptions_raised_as_chalice_errors(sample_app): @sample_app.route('/error') def raise_error(): raise TypeError("Raising arbitrary error, should never see.") event = create_event('/error', 'GET', {}) # This is intentional behavior. If we're not in debug mode # we don't want to surface internal errors that get raised. # We should reply with a general internal server error. with pytest.raises(app.ChaliceViewError): sample_app(event, context=None) def test_original_exception_raised_in_debug_mode(sample_app): sample_app.debug = True @sample_app.route('/error') def raise_error(): raise ValueError("You will see this error") event = create_event('/error', 'GET', {}) with pytest.raises(ValueError) as e: sample_app(event, context=None) # In debug mode, we let the original exception propagate. # This includes the original type as well as the message. assert str(e.value) == 'You will see this error' def test_chalice_view_errors_propagate_in_non_debug_mode(sample_app): @sample_app.route('/notfound') def notfound(): raise NotFoundError("resource not found") event = create_event('/notfound', 'GET', {}) with pytest.raises(NotFoundError): sample_app(event, context=None) def test_chalice_view_errors_propagate_in_debug_mode(sample_app): @sample_app.route('/notfound') def notfound(): raise NotFoundError("resource not found") sample_app.debug = True event = create_event('/notfound', 'GET', {}) with pytest.raises(NotFoundError): sample_app(event, context=None) def test_case_insensitive_mapping(): mapping = app.CaseInsensitiveMapping({'HEADER': 'Value'}) assert mapping['hEAdEr'] assert mapping.get('hEAdEr') assert 'hEAdEr' in mapping assert repr({'header': 'Value'}) in repr(mapping) def test_unknown_kwargs_raise_error(sample_app): with pytest.raises(TypeError): @sample_app.route('/foo', unknown_kwargs='foo') def badkwargs(): pass
0.506836
0.198938
from tests import IntegrationTestCase from tests.holodeck import Request from twilio.base.exceptions import TwilioException from twilio.http.response import Response class PublishedTrackTestCase(IntegrationTestCase): def test_fetch_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.video.v1.rooms(sid="RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .participants(sid="PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .published_tracks(sid="MTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch() self.holodeck.assert_has_request(Request( 'get', 'https://video.twilio.com/v1/Rooms/RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Participants/PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PublishedTracks/MTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', )) def test_fetch_response(self): self.holodeck.mock(Response( 200, ''' { "room_sid": "RMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "date_created": "2015-07-30T20:00:00Z", "date_updated": "2015-07-30T20:00:00Z", "participant_sid": "PAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "sid": "MTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "name": "bob-track", "kind": "data", "enabled": true, "url": "https://video.twilio.com/v1/Rooms/RMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Participants/PAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PublishedTracks/MTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } ''' )) actual = self.client.video.v1.rooms(sid="RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .participants(sid="PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .published_tracks(sid="MTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch() self.assertIsNotNone(actual) def test_list_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.video.v1.rooms(sid="RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .participants(sid="PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .published_tracks.list() self.holodeck.assert_has_request(Request( 'get', 'https://video.twilio.com/v1/Rooms/RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Participants/PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PublishedTracks', )) def test_read_empty_response(self): self.holodeck.mock(Response( 200, ''' { "published_tracks": [], "meta": { "page": 0, "page_size": 50, "first_page_url": "https://video.twilio.com/v1/Rooms/RMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Participants/PAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PublishedTracks?PageSize=50&Page=0", "previous_page_url": null, "url": "https://video.twilio.com/v1/Rooms/RMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Participants/PAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PublishedTracks?PageSize=50&Page=0", "next_page_url": null, "key": "published_tracks" } } ''' )) actual = self.client.video.v1.rooms(sid="RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .participants(sid="PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .published_tracks.list() self.assertIsNotNone(actual)
tests/integration/video/v1/room/room_participant/test_room_participant_published_track.py
from tests import IntegrationTestCase from tests.holodeck import Request from twilio.base.exceptions import TwilioException from twilio.http.response import Response class PublishedTrackTestCase(IntegrationTestCase): def test_fetch_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.video.v1.rooms(sid="RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .participants(sid="PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .published_tracks(sid="MTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch() self.holodeck.assert_has_request(Request( 'get', 'https://video.twilio.com/v1/Rooms/RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Participants/PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PublishedTracks/MTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', )) def test_fetch_response(self): self.holodeck.mock(Response( 200, ''' { "room_sid": "RMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "date_created": "2015-07-30T20:00:00Z", "date_updated": "2015-07-30T20:00:00Z", "participant_sid": "PAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "sid": "MTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "name": "bob-track", "kind": "data", "enabled": true, "url": "https://video.twilio.com/v1/Rooms/RMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Participants/PAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PublishedTracks/MTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } ''' )) actual = self.client.video.v1.rooms(sid="RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .participants(sid="PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .published_tracks(sid="MTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch() self.assertIsNotNone(actual) def test_list_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.video.v1.rooms(sid="RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .participants(sid="PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .published_tracks.list() self.holodeck.assert_has_request(Request( 'get', 'https://video.twilio.com/v1/Rooms/RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Participants/PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PublishedTracks', )) def test_read_empty_response(self): self.holodeck.mock(Response( 200, ''' { "published_tracks": [], "meta": { "page": 0, "page_size": 50, "first_page_url": "https://video.twilio.com/v1/Rooms/RMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Participants/PAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PublishedTracks?PageSize=50&Page=0", "previous_page_url": null, "url": "https://video.twilio.com/v1/Rooms/RMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Participants/PAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PublishedTracks?PageSize=50&Page=0", "next_page_url": null, "key": "published_tracks" } } ''' )) actual = self.client.video.v1.rooms(sid="RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .participants(sid="PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .published_tracks.list() self.assertIsNotNone(actual)
0.490236
0.402891
import os import argparse from copy import deepcopy from pipeline import PipelineObject, PipelineStage, PipelineEngine class SchemaTypeSlicer: xpath_ns = { "xs": "http://www.w3.org/2001/XMLSchema", "acrn": "https://projectacrn.org", } @classmethod def get_node(cls, element, xpath): return element.find(xpath, namespaces=cls.xpath_ns) @classmethod def get_nodes(cls, element, xpath): return element.findall(xpath, namespaces=cls.xpath_ns) def __init__(self, etree): self.etree = etree def get_type_definition(self, type_name): type_node = self.get_node(self.etree, f"//xs:complexType[@name='{type_name}']") if type_node is None: type_node = self.get_node(self.etree, f"//xs:simpleType[@name='{type_name}']") return type_node def slice_element_list(self, element_list_node, new_nodes): sliced = False for element_node in self.get_nodes(element_list_node, "xs:element"): if not self.is_element_needed(element_node): element_list_node.remove(element_node) sliced = True continue # For embedded complex type definition, also slice in place. If the sliced type contains no sub-element, # remove the element itself, too. element_type_node = self.get_node(element_node, "xs:complexType") if element_type_node is not None: new_sub_nodes = self.slice(element_type_node, in_place=True) if len(self.get_nodes(element_type_node, ".//xs:element")) > 0: new_nodes.extend(new_sub_nodes) else: element_list_node.remove(element_node) continue # For external type definition, create a copy to slice. If the sliced type contains no sub-element, remove # the element itself. element_type_name = element_node.get("type") if element_type_name: element_type_node = self.get_type_definition(element_type_name) if element_type_node is not None: sliced_type_name = self.get_name_of_slice(element_type_name) # If a sliced type already exists, do not duplicate the effort type_node = self.get_type_definition(sliced_type_name) if type_node is not None: element_node.set("type", sliced_type_name) sliced = True else: new_sub_nodes = self.slice(element_type_node) if len(new_sub_nodes) == 0: continue elif new_sub_nodes[-1].tag.endswith("simpleType") or len(self.get_nodes(new_sub_nodes[-1], ".//xs:element")) > 0: new_nodes.extend(new_sub_nodes) element_node.set("type", sliced_type_name) sliced = True else: element_list_node.remove(element_node) return sliced def slice_restriction(self, restriction_node): sliced = False for restriction in self.get_nodes(restriction_node, "xs:enumeration"): if not self.is_element_needed(restriction): restriction_node.remove(restriction) sliced = True return sliced def slice(self, type_node, in_place=False, force_copy=False): new_nodes = [] sliced = False if in_place: new_type_node = type_node else: new_type_node = deepcopy(type_node) type_name = type_node.get("name") if type_name != None: sliced_type_name = self.get_name_of_slice(type_name) new_type_node.set("name", sliced_type_name) element_list_node = self.get_node(new_type_node, "xs:all") if element_list_node is not None: sliced = self.slice_element_list(element_list_node, new_nodes) restriction_node = self.get_node(new_type_node, "xs:restriction") if restriction_node is not None: sliced = self.slice_restriction(restriction_node) if not in_place and (sliced or force_copy): new_nodes.append(new_type_node) return new_nodes def is_element_needed(self, element_node): return True def get_name_of_slice(self, name): return f"Sliced{name}" class SlicingSchemaByVMTypeStage(PipelineStage): uses = {"schema_etree"} provides = {"schema_etree"} class VMTypeSlicer(SchemaTypeSlicer): def is_element_needed(self, element_node): annot_node = self.get_node(element_node, "xs:annotation") if annot_node is None: return True applicable_vms = annot_node.get("{https://projectacrn.org}applicable-vms") return applicable_vms is None or applicable_vms.find(self.vm_type_indicator) >= 0 def get_name_of_slice(self, name): return f"{self.type_prefix}{name}" class PreLaunchedTypeSlicer(VMTypeSlicer): vm_type_indicator = "pre-launched" type_prefix = "PreLaunched" class ServiceVMTypeSlicer(VMTypeSlicer): vm_type_indicator = "service-vm" type_prefix = "Service" class PostLaunchedTypeSlicer(VMTypeSlicer): vm_type_indicator = "post-launched" type_prefix = "PostLaunched" def run(self, obj): schema_etree = obj.get("schema_etree") vm_type_name = "VMConfigType" vm_type_node = SchemaTypeSlicer.get_node(schema_etree, f"//xs:complexType[@name='{vm_type_name}']") slicers = [ self.PreLaunchedTypeSlicer(schema_etree), self.ServiceVMTypeSlicer(schema_etree), self.PostLaunchedTypeSlicer(schema_etree) ] for slicer in slicers: new_nodes = slicer.slice(vm_type_node, force_copy=True) for n in new_nodes: schema_etree.getroot().append(n) for node in SchemaTypeSlicer.get_nodes(schema_etree, "//xs:complexType[@name='ACRNConfigType']//xs:element[@name='vm']//xs:alternative"): test = node.get("test") if test.find("PRE_LAUNCHED_VM") >= 0: node.set("type", slicers[0].get_name_of_slice(vm_type_name)) elif test.find("SERVICE_VM") >= 0: node.set("type", slicers[1].get_name_of_slice(vm_type_name)) elif test.find("POST_LAUNCHED_VM") >= 0: node.set("type", slicers[2].get_name_of_slice(vm_type_name)) obj.set("schema_etree", schema_etree) class SlicingSchemaByViewStage(PipelineStage): uses = {"schema_etree"} provides = {"schema_etree"} class ViewSlicer(SchemaTypeSlicer): def is_element_needed(self, element_node): annot_node = self.get_node(element_node, "xs:annotation") if annot_node is None: return True views = annot_node.get("{https://projectacrn.org}views") return views is None or views.find(self.view_indicator) >= 0 def get_name_of_slice(self, name): if name.find("ConfigType") >= 0: return name.replace("ConfigType", f"{self.type_prefix}ConfigType") else: return f"{self.type_prefix}{name}" class BasicViewSlicer(ViewSlicer): view_indicator = "basic" type_prefix = "Basic" class AdvancedViewSlicer(ViewSlicer): view_indicator = "advanced" type_prefix = "Advanced" def run(self, obj): schema_etree = obj.get("schema_etree") type_nodes = list(filter(lambda x: x.get("name") and x.get("name").endswith("VMConfigType"), SchemaTypeSlicer.get_nodes(schema_etree, "//xs:complexType"))) type_nodes.append(SchemaTypeSlicer.get_node(schema_etree, "//xs:complexType[@name = 'HVConfigType']")) slicers = [ self.BasicViewSlicer(schema_etree), self.AdvancedViewSlicer(schema_etree), ] for slicer in slicers: for type_node in type_nodes: new_nodes = slicer.slice(type_node, force_copy=True) for n in new_nodes: schema_etree.getroot().append(n) obj.set("schema_etree", schema_etree) def main(args): from lxml_loader import LXMLLoadStage pipeline = PipelineEngine(["schema_path"]) pipeline.add_stages([ LXMLLoadStage("schema"), SlicingSchemaByVMTypeStage(), SlicingSchemaByViewStage(), ]) obj = PipelineObject(schema_path = args.schema) pipeline.run(obj) obj.get("schema_etree").write(args.out) print(f"Sliced schema written to {args.out}") if __name__ == "__main__": # abs __file__ path to ignore `__file__ == 'schema_slicer.py'` issue config_tools_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")) schema_dir = os.path.join(config_tools_dir, "schema") configurator_build_dir = os.path.join(config_tools_dir, 'configurator', 'build') if not os.path.isdir(configurator_build_dir): os.mkdir(configurator_build_dir) parser = argparse.ArgumentParser(description="Slice a given scenario schema by VM types and views") parser.add_argument("out", nargs="?", default=os.path.join(configurator_build_dir, "sliced.xsd"), help="Path where the output is placed") parser.add_argument("--schema", default=os.path.join(schema_dir, "config.xsd"), help="the XML schema that defines the syntax of scenario XMLs") args = parser.parse_args() main(args)
misc/config_tools/scenario_config/schema_slicer.py
import os import argparse from copy import deepcopy from pipeline import PipelineObject, PipelineStage, PipelineEngine class SchemaTypeSlicer: xpath_ns = { "xs": "http://www.w3.org/2001/XMLSchema", "acrn": "https://projectacrn.org", } @classmethod def get_node(cls, element, xpath): return element.find(xpath, namespaces=cls.xpath_ns) @classmethod def get_nodes(cls, element, xpath): return element.findall(xpath, namespaces=cls.xpath_ns) def __init__(self, etree): self.etree = etree def get_type_definition(self, type_name): type_node = self.get_node(self.etree, f"//xs:complexType[@name='{type_name}']") if type_node is None: type_node = self.get_node(self.etree, f"//xs:simpleType[@name='{type_name}']") return type_node def slice_element_list(self, element_list_node, new_nodes): sliced = False for element_node in self.get_nodes(element_list_node, "xs:element"): if not self.is_element_needed(element_node): element_list_node.remove(element_node) sliced = True continue # For embedded complex type definition, also slice in place. If the sliced type contains no sub-element, # remove the element itself, too. element_type_node = self.get_node(element_node, "xs:complexType") if element_type_node is not None: new_sub_nodes = self.slice(element_type_node, in_place=True) if len(self.get_nodes(element_type_node, ".//xs:element")) > 0: new_nodes.extend(new_sub_nodes) else: element_list_node.remove(element_node) continue # For external type definition, create a copy to slice. If the sliced type contains no sub-element, remove # the element itself. element_type_name = element_node.get("type") if element_type_name: element_type_node = self.get_type_definition(element_type_name) if element_type_node is not None: sliced_type_name = self.get_name_of_slice(element_type_name) # If a sliced type already exists, do not duplicate the effort type_node = self.get_type_definition(sliced_type_name) if type_node is not None: element_node.set("type", sliced_type_name) sliced = True else: new_sub_nodes = self.slice(element_type_node) if len(new_sub_nodes) == 0: continue elif new_sub_nodes[-1].tag.endswith("simpleType") or len(self.get_nodes(new_sub_nodes[-1], ".//xs:element")) > 0: new_nodes.extend(new_sub_nodes) element_node.set("type", sliced_type_name) sliced = True else: element_list_node.remove(element_node) return sliced def slice_restriction(self, restriction_node): sliced = False for restriction in self.get_nodes(restriction_node, "xs:enumeration"): if not self.is_element_needed(restriction): restriction_node.remove(restriction) sliced = True return sliced def slice(self, type_node, in_place=False, force_copy=False): new_nodes = [] sliced = False if in_place: new_type_node = type_node else: new_type_node = deepcopy(type_node) type_name = type_node.get("name") if type_name != None: sliced_type_name = self.get_name_of_slice(type_name) new_type_node.set("name", sliced_type_name) element_list_node = self.get_node(new_type_node, "xs:all") if element_list_node is not None: sliced = self.slice_element_list(element_list_node, new_nodes) restriction_node = self.get_node(new_type_node, "xs:restriction") if restriction_node is not None: sliced = self.slice_restriction(restriction_node) if not in_place and (sliced or force_copy): new_nodes.append(new_type_node) return new_nodes def is_element_needed(self, element_node): return True def get_name_of_slice(self, name): return f"Sliced{name}" class SlicingSchemaByVMTypeStage(PipelineStage): uses = {"schema_etree"} provides = {"schema_etree"} class VMTypeSlicer(SchemaTypeSlicer): def is_element_needed(self, element_node): annot_node = self.get_node(element_node, "xs:annotation") if annot_node is None: return True applicable_vms = annot_node.get("{https://projectacrn.org}applicable-vms") return applicable_vms is None or applicable_vms.find(self.vm_type_indicator) >= 0 def get_name_of_slice(self, name): return f"{self.type_prefix}{name}" class PreLaunchedTypeSlicer(VMTypeSlicer): vm_type_indicator = "pre-launched" type_prefix = "PreLaunched" class ServiceVMTypeSlicer(VMTypeSlicer): vm_type_indicator = "service-vm" type_prefix = "Service" class PostLaunchedTypeSlicer(VMTypeSlicer): vm_type_indicator = "post-launched" type_prefix = "PostLaunched" def run(self, obj): schema_etree = obj.get("schema_etree") vm_type_name = "VMConfigType" vm_type_node = SchemaTypeSlicer.get_node(schema_etree, f"//xs:complexType[@name='{vm_type_name}']") slicers = [ self.PreLaunchedTypeSlicer(schema_etree), self.ServiceVMTypeSlicer(schema_etree), self.PostLaunchedTypeSlicer(schema_etree) ] for slicer in slicers: new_nodes = slicer.slice(vm_type_node, force_copy=True) for n in new_nodes: schema_etree.getroot().append(n) for node in SchemaTypeSlicer.get_nodes(schema_etree, "//xs:complexType[@name='ACRNConfigType']//xs:element[@name='vm']//xs:alternative"): test = node.get("test") if test.find("PRE_LAUNCHED_VM") >= 0: node.set("type", slicers[0].get_name_of_slice(vm_type_name)) elif test.find("SERVICE_VM") >= 0: node.set("type", slicers[1].get_name_of_slice(vm_type_name)) elif test.find("POST_LAUNCHED_VM") >= 0: node.set("type", slicers[2].get_name_of_slice(vm_type_name)) obj.set("schema_etree", schema_etree) class SlicingSchemaByViewStage(PipelineStage): uses = {"schema_etree"} provides = {"schema_etree"} class ViewSlicer(SchemaTypeSlicer): def is_element_needed(self, element_node): annot_node = self.get_node(element_node, "xs:annotation") if annot_node is None: return True views = annot_node.get("{https://projectacrn.org}views") return views is None or views.find(self.view_indicator) >= 0 def get_name_of_slice(self, name): if name.find("ConfigType") >= 0: return name.replace("ConfigType", f"{self.type_prefix}ConfigType") else: return f"{self.type_prefix}{name}" class BasicViewSlicer(ViewSlicer): view_indicator = "basic" type_prefix = "Basic" class AdvancedViewSlicer(ViewSlicer): view_indicator = "advanced" type_prefix = "Advanced" def run(self, obj): schema_etree = obj.get("schema_etree") type_nodes = list(filter(lambda x: x.get("name") and x.get("name").endswith("VMConfigType"), SchemaTypeSlicer.get_nodes(schema_etree, "//xs:complexType"))) type_nodes.append(SchemaTypeSlicer.get_node(schema_etree, "//xs:complexType[@name = 'HVConfigType']")) slicers = [ self.BasicViewSlicer(schema_etree), self.AdvancedViewSlicer(schema_etree), ] for slicer in slicers: for type_node in type_nodes: new_nodes = slicer.slice(type_node, force_copy=True) for n in new_nodes: schema_etree.getroot().append(n) obj.set("schema_etree", schema_etree) def main(args): from lxml_loader import LXMLLoadStage pipeline = PipelineEngine(["schema_path"]) pipeline.add_stages([ LXMLLoadStage("schema"), SlicingSchemaByVMTypeStage(), SlicingSchemaByViewStage(), ]) obj = PipelineObject(schema_path = args.schema) pipeline.run(obj) obj.get("schema_etree").write(args.out) print(f"Sliced schema written to {args.out}") if __name__ == "__main__": # abs __file__ path to ignore `__file__ == 'schema_slicer.py'` issue config_tools_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")) schema_dir = os.path.join(config_tools_dir, "schema") configurator_build_dir = os.path.join(config_tools_dir, 'configurator', 'build') if not os.path.isdir(configurator_build_dir): os.mkdir(configurator_build_dir) parser = argparse.ArgumentParser(description="Slice a given scenario schema by VM types and views") parser.add_argument("out", nargs="?", default=os.path.join(configurator_build_dir, "sliced.xsd"), help="Path where the output is placed") parser.add_argument("--schema", default=os.path.join(schema_dir, "config.xsd"), help="the XML schema that defines the syntax of scenario XMLs") args = parser.parse_args() main(args)
0.613005
0.179028
import os import sys import time import json import os.path import hashlib import logging import threading from decimal import Decimal from flask_socketio import SocketIO from flask import Flask, render_template, url_for, request from binance_api import api_master_rest_caller from binance_api import api_master_socket_caller from . import trader MULTI_DEPTH_INDICATORS = ['ema', 'sma', 'rma', 'order'] # Initilize globals. ## Setup flask app/socket APP = Flask(__name__) SOCKET_IO = SocketIO(APP) ## Initilize base core object. core_object = None started_updater = False ## Initilize IP/port pair globals. host_ip = '' host_port = '' ## Set traders cache file name. CAHCE_FILES = 'traders.json' @APP.context_processor def override_url_for(): return(dict(url_for=dated_url_for)) def dated_url_for(endpoint, **values): # Override to prevent cached assets being used. if endpoint == 'static': filename = values.get('filename', None) if filename: file_path = os.path.join(APP.root_path, endpoint, filename) values['q'] = int(os.stat(file_path).st_mtime) return url_for(endpoint, **values) @APP.route('/', methods=['GET']) def control_panel(): # Base control panel configuration. global started_updater ## Web updater used for live updating. if not(started_updater): started_updater = True web_updater_thread = threading.Thread(target=web_updater) web_updater_thread.start() ## Set socket ip/port. start_up_data = { 'host':{'IP': host_ip, 'Port': host_port}, 'market_symbols': core_object.trading_markets } return(render_template('main_page.html', data=start_up_data)) @APP.route('/rest-api/v1/trader_update', methods=['POST']) def update_trader(): # Base API for managing trader interaction. data = request.get_json() ## Check if specified bot exists. current_trader = api_error_check(data) if current_trader == None: ## No trader therefore return false. return(json.dumps({'call':False, 'message':'INVALID_TRADER'})) elif data['action'] == 'start': ## Updating trader status to running. if current_trader.state_data['runtime_state'] == 'FORCE_PAUSE': current_trader.state_data['runtime_state'] = 'RUN' elif data['action'] == 'pause': ## Updating trader status to paused. if current_trader.state_data['runtime_state'] == 'RUN': current_trader.state_data['runtime_state'] = 'FORCE_PAUSE' else: ## If action was not found return false return(json.dumps({'call':False, 'message':'INVALID_ACTION'})) return(json.dumps({'call':True})) @APP.route('/rest-api/v1/get_trader_charting', methods=['GET']) def get_trader_charting(): # Endpoint to pass trader indicator data. market = request.args.get('market') limit = int(request.args.get('limit')) data = {'market':market} ## Check if specified bot exists. current_trader = api_error_check(data) if current_trader == None: ## No trader therefore return false. return(json.dumps({'call':False, 'message':'INVALID_TRADER'})) candle_data = core_object.get_trader_candles(current_trader.print_pair)[:limit] indicator_data = core_object.get_trader_indicators(current_trader.print_pair) short_indicator_data = shorten_indicators(indicator_data, candle_data[-1][0]) return(json.dumps({'call':True, 'data':{'market':market, 'indicators':short_indicator_data, 'candles':candle_data}})) @APP.route('/rest-api/v1/get_trader_indicators', methods=['GET']) def get_trader_indicators(): # Endpoint to pass trader indicator data. market = request.args.get('market') limit = int(request.args.get('limit')) data = {'market':market} ## Check if specified bot exists. current_trader = api_error_check(data) if current_trader == None: ## No trader therefore return false. return(json.dumps({'call':False, 'message':'INVALID_TRADER'})) indicator_data = core_object.get_trader_indicators(current_trader.print_pair) return(json.dumps({'call':True, 'data':{'market':market, 'indicators':indicator_data}})) @APP.route('/rest-api/v1/get_trader_candles', methods=['GET']) def get_trader_candles(): # Endpoint to pass trader candles. market = request.args.get('market') limit = int(request.args.get('limit')) data = {'market':market} ## Check if specified bot exists. current_trader = api_error_check(data) if current_trader == None: ## No trader therefore return false. return(json.dumps({'call':False, 'message':'INVALID_TRADER'})) candle_data = core_object.get_trader_candles(current_trader.print_pair)[:limit] return(json.dumps({'call':True, 'data':{'market':market, 'candles':candle_data}})) @APP.route('/rest-api/v1/test', methods=['GET']) def test_rest_call(): # API endpoint test return(json.dumps({'call':True, 'message':'HELLO WORLD!'})) def shorten_indicators(indicators, end_time): base_indicators = {} for ind in indicators: if ind in MULTI_DEPTH_INDICATORS: base_indicators.update({ind:{}}) for sub_ind in indicators[ind]: base_indicators[ind].update({sub_ind:[ [val[0] if ind != 'order' else val[0]*1000,val[1]] for val in indicators[ind][sub_ind] if (val[0] if ind != 'order' else val[0]*1000) > end_time ]}) else: base_indicators.update({ind:[ [val[0],val[1]] for val in indicators[ind] if val[0] > end_time]}) return(base_indicators) def api_error_check(data): ## Check if specified bot exists. current_trader = None for trader in core_object.trader_objects: if trader.print_pair == data['market']: current_trader = trader break return(current_trader) def web_updater(): # Web updater use to update live via socket. lastHash = None while True: if core_object.coreState == 'RUN': ## Get trader data and hash it to find out if there have been any changes. traderData = core_object.get_trader_data() currHash = hashlib.md5(str(traderData).encode()) if lastHash != currHash: ## Update any new changes via socket. lastHash = currHash total_bulk_data = [] for trader in traderData: bulk_data = {} bulk_data.update({'market':trader['market']}) bulk_data.update({'trade_recorder':trader['trade_recorder']}) bulk_data.update({'wallet_pair':trader['wallet_pair']}) bulk_data.update(trader['custom_conditions']) bulk_data.update(trader['market_activity']) bulk_data.update(trader['market_prices']) bulk_data.update(trader['state_data']) total_bulk_data.append(bulk_data) SOCKET_IO.emit('current_traders_data', {'data':total_bulk_data}) time.sleep(.8) class BotCore(): def __init__(self, settings, logs_dir, cache_dir): # Initilization for the bot core managment object. logging.info('[BotCore] Initilizing the BotCore object.') ## Setup binance REST and socket API. self.rest_api = api_master_rest_caller.Binance_REST(settings['public_key'], settings['private_key']) self.socket_api = api_master_socket_caller.Binance_SOCK() ## Setup the logs/cache dir locations. self.logs_dir = logs_dir self.cache_dir = cache_dir ## Setup run type, market type, and update bnb balance. self.run_type = settings['run_type'] self.market_type = settings['market_type'] self.update_bnb_balance = settings['update_bnb_balance'] ## Setup max candle/depth setting. self.max_candles = settings['max_candles'] self.max_depth = settings['max_depth'] ## Get base quote pair (This prevents multiple different pairs from conflicting.) pair_one = settings['trading_markets'][0] self.quote_asset = pair_one[:pair_one.index('-')] self.base_currency = settings['trading_currency'] self.candle_Interval = settings['trader_interval'] ## Initilize base trader settings. self.trader_objects = [] self.trading_markets = settings['trading_markets'] ## Initilize core state self.coreState = 'READY' def start(self): # Start the core object. logging.info('[BotCore] Starting the BotCore object.') self.coreState = 'SETUP' ## check markets found_markets = [] not_supported = [] #breakpoint() for market in self.rest_api.get_exchangeInfo()['symbols']: fmtMarket = '{0}-{1}'.format(market['quoteAsset'], market['baseAsset']) # If the current market is not in the trading markets list then skip. if not fmtMarket in self.trading_markets: continue found_markets.append(fmtMarket) if (self.market_type == 'MARGIN' and market['isMarginTradingAllowed'] == False) or (self.market_type == 'SPOT' and market['isSpotTradingAllowed'] == False): not_supported.append(fmtMarket) continue # This is used to setup min quantity. if float(market['filters'][2]['minQty']) < 1.0: minQuantBase = (Decimal(market['filters'][2]['minQty'])).as_tuple() lS = abs(int(len(minQuantBase.digits)+minQuantBase.exponent))+1 else: lS = 0 # This is used to set up the price precision for the market. tickSizeBase = (Decimal(market['filters'][0]['tickSize'])).as_tuple() tS = abs(int(len(tickSizeBase.digits)+tickSizeBase.exponent))+1 # This is used to get the markets minimal notation. mN = float(market['filters'][3]['minNotional']) # Put all rules into a json object to pass to the trader. market_rules = {'LOT_SIZE':lS, 'TICK_SIZE':tS, 'MINIMUM_NOTATION':mN} # Initilize trader objecta dn also set-up its inital required data. traderObject = trader.BaseTrader(market['quoteAsset'], market['baseAsset'], self.rest_api, socket_api=self.socket_api) traderObject.setup_initial_values(self.market_type, self.run_type, market_rules) self.trader_objects.append(traderObject) ## Show markets that dont exist on the binance exchange. if len(self.trading_markets) != len(found_markets): no_market_text = '' for market in [market for market in self.trading_markets if market not in found_markets]: no_market_text+=str(market)+', ' logging.warning('Following pairs dont exist: {}'.format(no_market_text[:-2])) ## Show markets that dont support the market type. if len(not_supported) > 0: not_support_text = '' for market in not_supported: not_support_text += ' '+str(market) logging.warning('[BotCore] Following market pairs are not supported for {}: {}'.format(self.market_type, not_support_text)) valid_tading_markets = [market for market in found_markets if market not in not_supported] ## setup the binance socket. for market in valid_tading_markets: self.socket_api.set_candle_stream(symbol=market, interval=self.candle_Interval) self.socket_api.set_manual_depth_stream(symbol=market, update_speed='1000ms') #breakpoint() if self.run_type == 'REAL': self.socket_api.set_userDataStream(self.rest_api, self.market_type) self.socket_api.BASE_CANDLE_LIMIT = self.max_candles self.socket_api.BASE_DEPTH_LIMIT = self.max_depth self.socket_api.build_query() self.socket_api.set_live_and_historic_combo(self.rest_api) self.socket_api.start() # Load the wallets. if self.run_type == 'REAL': user_info = self.rest_api.get_account(self.market_type) #iwan: todo: check if this request is successfull. #one case is request is ahead of time, and binance return error 'code':-1021 'msg':"Timestamp for this request was 1000ms ahead of the server's time. if self.market_type == 'SPOT': wallet_balances = user_info['balances'] elif self.market_type == 'MARGIN': wallet_balances = user_info['userAssets'] current_tokens = {} for balance in wallet_balances: total_balance = (float(balance['free']) + float(balance['locked'])) if total_balance > 0: current_tokens.update({balance['asset']:[ float(balance['free']), float(balance['locked'])]}) else: current_tokens = {self.quote_asset:[float(self.base_currency), 0.0]} # Load cached data cached_traders_data = None if os.path.exists(self.cache_dir+CAHCE_FILES): with open(self.cache_dir+CAHCE_FILES, 'r') as f: cached_traders_data = json.load(f)['data'] ## Setup the trader objects and start them. logging.info('[BotCore] Starting the trader objects.') #breakpoint() for trader_ in self.trader_objects: currSymbol = "{0}{1}".format(trader_.base_asset, trader_.quote_asset) # Update trader with cached data (to resume trades/keep records of trades.) if cached_traders_data != '' and cached_traders_data: for cached_trader in cached_traders_data: m_split = cached_trader['market'].split('-') if (m_split[1]+m_split[0]) == currSymbol: trader_.configuration = cached_trader['configuration'] trader_.custom_conditional_data = cached_trader['custom_conditions'] trader_.market_activity = cached_trader['market_activity'] trader_.trade_recorder = cached_trader['trade_recorder'] trader_.state_data = cached_trader['state_data'] wallet_pair = {} if trader_.quote_asset in current_tokens: wallet_pair.update({trader_.quote_asset:current_tokens[trader_.quote_asset]}) if trader_.base_asset in current_tokens: wallet_pair.update({trader_.base_asset:current_tokens[trader_.base_asset]}) trader_.start(self.base_currency, wallet_pair) logging.debug('[BotCore] Starting trader manager') TM_thread = threading.Thread(target=self._trader_manager) TM_thread.start() if self.update_bnb_balance: logging.debug('[BotCore] Starting BNB manager') BNB_thread = threading.Thread(target=self._bnb_manager) BNB_thread.start() logging.debug('[BotCore] Starting connection manager thread.') CM_thread = threading.Thread(target=self._connection_manager) CM_thread.start() logging.debug('[BotCore] Starting file manager thread.') FM_thread = threading.Thread(target=self._file_manager) FM_thread.start() logging.info('[BotCore] BotCore successfully started.') self.coreState = 'RUN' def _trader_manager(self): ''' ''' while self.coreState != 'STOP': pass def _bnb_manager(self): ''' This will manage BNB balance and update if there is low BNB in account. ''' last_wallet_update_time = 0 while self.coreState != 'STOP': socket_buffer_global = self.socket_api.socketBuffer # If outbound postion is seen then wallet has updated. if 'outboundAccountPosition' in socket_buffer_global: if last_wallet_update_time != socket_buffer_global['outboundAccountPosition']['E']: last_wallet_update_time = socket_buffer_global['outboundAccountPosition']['E'] for wallet in socket_buffer_global['outboundAccountPosition']['B']: if wallet['a'] == 'BNB': if float(wallet['f']) < 0.01: bnb_order = self.rest_api.place_order(self.market_type, symbol='BNBBTC', side='BUY', type='MARKET', quantity=0.1) time.sleep(2) def _file_manager(self): ''' This section is responsible for activly updating the traders cache files. ''' while self.coreState != 'STOP': time.sleep(15) traders_data = self.get_trader_data() if os.path.exists(self.cache_dir): file_path = '{0}{1}'.format(self.cache_dir,CAHCE_FILES) with open(file_path, 'w') as f: json.dump({'lastUpdateTime':time.time() ,'data':traders_data}, f) def _connection_manager(self): ''' This section is responsible for re-testing connectiongs in the event of a disconnect. ''' update_time = 0 retryCounter = 1 time.sleep(20) while self.coreState != 'STOP': time.sleep(1) if self.coreState != 'RUN': continue if self.socket_api.last_data_recv_time != update_time: update_time = self.socket_api.last_data_recv_time else: if (update_time + (15*retryCounter)) < time.time(): retryCounter += 1 try: print(self.rest_api.test_ping()) except Exception as e: logging.warning('[BotCore] Connection issue: {0}.'.format(e)) continue logging.info('[BotCore] Connection issue resolved.') if not(self.socket_api.socketRunning): logging.info('[BotCore] Attempting socket restart.') self.socket_api.start() def get_trader_data(self): ''' This can be called to return data for each of the active traders. ''' rData = [ _trader.get_trader_data() for _trader in self.trader_objects ] return(rData) def get_trader_indicators(self, market): ''' This can be called to return the indicators that are used by the traders (Will be used to display web UI activity.) ''' for _trader in self.trader_objects: if _trader.print_pair == market: indicator_data = _trader.indicators indicator_data.update({'order':{'buy':[], 'sell':[]}}) indicator_data['order']['buy'] = [ [order[0],order[1]] for order in _trader.trade_recorder if order[4] == 'BUY'] indicator_data['order']['sell'] = [ [order[0],order[1]] for order in _trader.trade_recorder if order[4] == 'SELL'] return(indicator_data) def get_trader_candles(self, market): ''' This can be called to return the candle data for the traders (Will be used to display web UI activity.) ''' for _trader in self.trader_objects: if _trader.print_pair == market: sock_symbol = str(_trader.base_asset)+str(_trader.quote_asset) return(self.socket_api.get_live_candles(sock_symbol)) def start(settings, logs_dir, cache_dir): global core_object, host_ip, host_port if core_object == None: core_object = BotCore(settings, logs_dir, cache_dir) core_object.start() logging.info('[BotCore] Starting traders in {0} mode, market type is {1}.'.format(settings['run_type'], settings['market_type'])) log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) host_ip = settings['host_ip'] host_port = settings['host_port'] SOCKET_IO.run(APP, host=settings['host_ip'], port=settings['host_port'], debug=True, use_reloader=False)
core/botCore.py
import os import sys import time import json import os.path import hashlib import logging import threading from decimal import Decimal from flask_socketio import SocketIO from flask import Flask, render_template, url_for, request from binance_api import api_master_rest_caller from binance_api import api_master_socket_caller from . import trader MULTI_DEPTH_INDICATORS = ['ema', 'sma', 'rma', 'order'] # Initilize globals. ## Setup flask app/socket APP = Flask(__name__) SOCKET_IO = SocketIO(APP) ## Initilize base core object. core_object = None started_updater = False ## Initilize IP/port pair globals. host_ip = '' host_port = '' ## Set traders cache file name. CAHCE_FILES = 'traders.json' @APP.context_processor def override_url_for(): return(dict(url_for=dated_url_for)) def dated_url_for(endpoint, **values): # Override to prevent cached assets being used. if endpoint == 'static': filename = values.get('filename', None) if filename: file_path = os.path.join(APP.root_path, endpoint, filename) values['q'] = int(os.stat(file_path).st_mtime) return url_for(endpoint, **values) @APP.route('/', methods=['GET']) def control_panel(): # Base control panel configuration. global started_updater ## Web updater used for live updating. if not(started_updater): started_updater = True web_updater_thread = threading.Thread(target=web_updater) web_updater_thread.start() ## Set socket ip/port. start_up_data = { 'host':{'IP': host_ip, 'Port': host_port}, 'market_symbols': core_object.trading_markets } return(render_template('main_page.html', data=start_up_data)) @APP.route('/rest-api/v1/trader_update', methods=['POST']) def update_trader(): # Base API for managing trader interaction. data = request.get_json() ## Check if specified bot exists. current_trader = api_error_check(data) if current_trader == None: ## No trader therefore return false. return(json.dumps({'call':False, 'message':'INVALID_TRADER'})) elif data['action'] == 'start': ## Updating trader status to running. if current_trader.state_data['runtime_state'] == 'FORCE_PAUSE': current_trader.state_data['runtime_state'] = 'RUN' elif data['action'] == 'pause': ## Updating trader status to paused. if current_trader.state_data['runtime_state'] == 'RUN': current_trader.state_data['runtime_state'] = 'FORCE_PAUSE' else: ## If action was not found return false return(json.dumps({'call':False, 'message':'INVALID_ACTION'})) return(json.dumps({'call':True})) @APP.route('/rest-api/v1/get_trader_charting', methods=['GET']) def get_trader_charting(): # Endpoint to pass trader indicator data. market = request.args.get('market') limit = int(request.args.get('limit')) data = {'market':market} ## Check if specified bot exists. current_trader = api_error_check(data) if current_trader == None: ## No trader therefore return false. return(json.dumps({'call':False, 'message':'INVALID_TRADER'})) candle_data = core_object.get_trader_candles(current_trader.print_pair)[:limit] indicator_data = core_object.get_trader_indicators(current_trader.print_pair) short_indicator_data = shorten_indicators(indicator_data, candle_data[-1][0]) return(json.dumps({'call':True, 'data':{'market':market, 'indicators':short_indicator_data, 'candles':candle_data}})) @APP.route('/rest-api/v1/get_trader_indicators', methods=['GET']) def get_trader_indicators(): # Endpoint to pass trader indicator data. market = request.args.get('market') limit = int(request.args.get('limit')) data = {'market':market} ## Check if specified bot exists. current_trader = api_error_check(data) if current_trader == None: ## No trader therefore return false. return(json.dumps({'call':False, 'message':'INVALID_TRADER'})) indicator_data = core_object.get_trader_indicators(current_trader.print_pair) return(json.dumps({'call':True, 'data':{'market':market, 'indicators':indicator_data}})) @APP.route('/rest-api/v1/get_trader_candles', methods=['GET']) def get_trader_candles(): # Endpoint to pass trader candles. market = request.args.get('market') limit = int(request.args.get('limit')) data = {'market':market} ## Check if specified bot exists. current_trader = api_error_check(data) if current_trader == None: ## No trader therefore return false. return(json.dumps({'call':False, 'message':'INVALID_TRADER'})) candle_data = core_object.get_trader_candles(current_trader.print_pair)[:limit] return(json.dumps({'call':True, 'data':{'market':market, 'candles':candle_data}})) @APP.route('/rest-api/v1/test', methods=['GET']) def test_rest_call(): # API endpoint test return(json.dumps({'call':True, 'message':'HELLO WORLD!'})) def shorten_indicators(indicators, end_time): base_indicators = {} for ind in indicators: if ind in MULTI_DEPTH_INDICATORS: base_indicators.update({ind:{}}) for sub_ind in indicators[ind]: base_indicators[ind].update({sub_ind:[ [val[0] if ind != 'order' else val[0]*1000,val[1]] for val in indicators[ind][sub_ind] if (val[0] if ind != 'order' else val[0]*1000) > end_time ]}) else: base_indicators.update({ind:[ [val[0],val[1]] for val in indicators[ind] if val[0] > end_time]}) return(base_indicators) def api_error_check(data): ## Check if specified bot exists. current_trader = None for trader in core_object.trader_objects: if trader.print_pair == data['market']: current_trader = trader break return(current_trader) def web_updater(): # Web updater use to update live via socket. lastHash = None while True: if core_object.coreState == 'RUN': ## Get trader data and hash it to find out if there have been any changes. traderData = core_object.get_trader_data() currHash = hashlib.md5(str(traderData).encode()) if lastHash != currHash: ## Update any new changes via socket. lastHash = currHash total_bulk_data = [] for trader in traderData: bulk_data = {} bulk_data.update({'market':trader['market']}) bulk_data.update({'trade_recorder':trader['trade_recorder']}) bulk_data.update({'wallet_pair':trader['wallet_pair']}) bulk_data.update(trader['custom_conditions']) bulk_data.update(trader['market_activity']) bulk_data.update(trader['market_prices']) bulk_data.update(trader['state_data']) total_bulk_data.append(bulk_data) SOCKET_IO.emit('current_traders_data', {'data':total_bulk_data}) time.sleep(.8) class BotCore(): def __init__(self, settings, logs_dir, cache_dir): # Initilization for the bot core managment object. logging.info('[BotCore] Initilizing the BotCore object.') ## Setup binance REST and socket API. self.rest_api = api_master_rest_caller.Binance_REST(settings['public_key'], settings['private_key']) self.socket_api = api_master_socket_caller.Binance_SOCK() ## Setup the logs/cache dir locations. self.logs_dir = logs_dir self.cache_dir = cache_dir ## Setup run type, market type, and update bnb balance. self.run_type = settings['run_type'] self.market_type = settings['market_type'] self.update_bnb_balance = settings['update_bnb_balance'] ## Setup max candle/depth setting. self.max_candles = settings['max_candles'] self.max_depth = settings['max_depth'] ## Get base quote pair (This prevents multiple different pairs from conflicting.) pair_one = settings['trading_markets'][0] self.quote_asset = pair_one[:pair_one.index('-')] self.base_currency = settings['trading_currency'] self.candle_Interval = settings['trader_interval'] ## Initilize base trader settings. self.trader_objects = [] self.trading_markets = settings['trading_markets'] ## Initilize core state self.coreState = 'READY' def start(self): # Start the core object. logging.info('[BotCore] Starting the BotCore object.') self.coreState = 'SETUP' ## check markets found_markets = [] not_supported = [] #breakpoint() for market in self.rest_api.get_exchangeInfo()['symbols']: fmtMarket = '{0}-{1}'.format(market['quoteAsset'], market['baseAsset']) # If the current market is not in the trading markets list then skip. if not fmtMarket in self.trading_markets: continue found_markets.append(fmtMarket) if (self.market_type == 'MARGIN' and market['isMarginTradingAllowed'] == False) or (self.market_type == 'SPOT' and market['isSpotTradingAllowed'] == False): not_supported.append(fmtMarket) continue # This is used to setup min quantity. if float(market['filters'][2]['minQty']) < 1.0: minQuantBase = (Decimal(market['filters'][2]['minQty'])).as_tuple() lS = abs(int(len(minQuantBase.digits)+minQuantBase.exponent))+1 else: lS = 0 # This is used to set up the price precision for the market. tickSizeBase = (Decimal(market['filters'][0]['tickSize'])).as_tuple() tS = abs(int(len(tickSizeBase.digits)+tickSizeBase.exponent))+1 # This is used to get the markets minimal notation. mN = float(market['filters'][3]['minNotional']) # Put all rules into a json object to pass to the trader. market_rules = {'LOT_SIZE':lS, 'TICK_SIZE':tS, 'MINIMUM_NOTATION':mN} # Initilize trader objecta dn also set-up its inital required data. traderObject = trader.BaseTrader(market['quoteAsset'], market['baseAsset'], self.rest_api, socket_api=self.socket_api) traderObject.setup_initial_values(self.market_type, self.run_type, market_rules) self.trader_objects.append(traderObject) ## Show markets that dont exist on the binance exchange. if len(self.trading_markets) != len(found_markets): no_market_text = '' for market in [market for market in self.trading_markets if market not in found_markets]: no_market_text+=str(market)+', ' logging.warning('Following pairs dont exist: {}'.format(no_market_text[:-2])) ## Show markets that dont support the market type. if len(not_supported) > 0: not_support_text = '' for market in not_supported: not_support_text += ' '+str(market) logging.warning('[BotCore] Following market pairs are not supported for {}: {}'.format(self.market_type, not_support_text)) valid_tading_markets = [market for market in found_markets if market not in not_supported] ## setup the binance socket. for market in valid_tading_markets: self.socket_api.set_candle_stream(symbol=market, interval=self.candle_Interval) self.socket_api.set_manual_depth_stream(symbol=market, update_speed='1000ms') #breakpoint() if self.run_type == 'REAL': self.socket_api.set_userDataStream(self.rest_api, self.market_type) self.socket_api.BASE_CANDLE_LIMIT = self.max_candles self.socket_api.BASE_DEPTH_LIMIT = self.max_depth self.socket_api.build_query() self.socket_api.set_live_and_historic_combo(self.rest_api) self.socket_api.start() # Load the wallets. if self.run_type == 'REAL': user_info = self.rest_api.get_account(self.market_type) #iwan: todo: check if this request is successfull. #one case is request is ahead of time, and binance return error 'code':-1021 'msg':"Timestamp for this request was 1000ms ahead of the server's time. if self.market_type == 'SPOT': wallet_balances = user_info['balances'] elif self.market_type == 'MARGIN': wallet_balances = user_info['userAssets'] current_tokens = {} for balance in wallet_balances: total_balance = (float(balance['free']) + float(balance['locked'])) if total_balance > 0: current_tokens.update({balance['asset']:[ float(balance['free']), float(balance['locked'])]}) else: current_tokens = {self.quote_asset:[float(self.base_currency), 0.0]} # Load cached data cached_traders_data = None if os.path.exists(self.cache_dir+CAHCE_FILES): with open(self.cache_dir+CAHCE_FILES, 'r') as f: cached_traders_data = json.load(f)['data'] ## Setup the trader objects and start them. logging.info('[BotCore] Starting the trader objects.') #breakpoint() for trader_ in self.trader_objects: currSymbol = "{0}{1}".format(trader_.base_asset, trader_.quote_asset) # Update trader with cached data (to resume trades/keep records of trades.) if cached_traders_data != '' and cached_traders_data: for cached_trader in cached_traders_data: m_split = cached_trader['market'].split('-') if (m_split[1]+m_split[0]) == currSymbol: trader_.configuration = cached_trader['configuration'] trader_.custom_conditional_data = cached_trader['custom_conditions'] trader_.market_activity = cached_trader['market_activity'] trader_.trade_recorder = cached_trader['trade_recorder'] trader_.state_data = cached_trader['state_data'] wallet_pair = {} if trader_.quote_asset in current_tokens: wallet_pair.update({trader_.quote_asset:current_tokens[trader_.quote_asset]}) if trader_.base_asset in current_tokens: wallet_pair.update({trader_.base_asset:current_tokens[trader_.base_asset]}) trader_.start(self.base_currency, wallet_pair) logging.debug('[BotCore] Starting trader manager') TM_thread = threading.Thread(target=self._trader_manager) TM_thread.start() if self.update_bnb_balance: logging.debug('[BotCore] Starting BNB manager') BNB_thread = threading.Thread(target=self._bnb_manager) BNB_thread.start() logging.debug('[BotCore] Starting connection manager thread.') CM_thread = threading.Thread(target=self._connection_manager) CM_thread.start() logging.debug('[BotCore] Starting file manager thread.') FM_thread = threading.Thread(target=self._file_manager) FM_thread.start() logging.info('[BotCore] BotCore successfully started.') self.coreState = 'RUN' def _trader_manager(self): ''' ''' while self.coreState != 'STOP': pass def _bnb_manager(self): ''' This will manage BNB balance and update if there is low BNB in account. ''' last_wallet_update_time = 0 while self.coreState != 'STOP': socket_buffer_global = self.socket_api.socketBuffer # If outbound postion is seen then wallet has updated. if 'outboundAccountPosition' in socket_buffer_global: if last_wallet_update_time != socket_buffer_global['outboundAccountPosition']['E']: last_wallet_update_time = socket_buffer_global['outboundAccountPosition']['E'] for wallet in socket_buffer_global['outboundAccountPosition']['B']: if wallet['a'] == 'BNB': if float(wallet['f']) < 0.01: bnb_order = self.rest_api.place_order(self.market_type, symbol='BNBBTC', side='BUY', type='MARKET', quantity=0.1) time.sleep(2) def _file_manager(self): ''' This section is responsible for activly updating the traders cache files. ''' while self.coreState != 'STOP': time.sleep(15) traders_data = self.get_trader_data() if os.path.exists(self.cache_dir): file_path = '{0}{1}'.format(self.cache_dir,CAHCE_FILES) with open(file_path, 'w') as f: json.dump({'lastUpdateTime':time.time() ,'data':traders_data}, f) def _connection_manager(self): ''' This section is responsible for re-testing connectiongs in the event of a disconnect. ''' update_time = 0 retryCounter = 1 time.sleep(20) while self.coreState != 'STOP': time.sleep(1) if self.coreState != 'RUN': continue if self.socket_api.last_data_recv_time != update_time: update_time = self.socket_api.last_data_recv_time else: if (update_time + (15*retryCounter)) < time.time(): retryCounter += 1 try: print(self.rest_api.test_ping()) except Exception as e: logging.warning('[BotCore] Connection issue: {0}.'.format(e)) continue logging.info('[BotCore] Connection issue resolved.') if not(self.socket_api.socketRunning): logging.info('[BotCore] Attempting socket restart.') self.socket_api.start() def get_trader_data(self): ''' This can be called to return data for each of the active traders. ''' rData = [ _trader.get_trader_data() for _trader in self.trader_objects ] return(rData) def get_trader_indicators(self, market): ''' This can be called to return the indicators that are used by the traders (Will be used to display web UI activity.) ''' for _trader in self.trader_objects: if _trader.print_pair == market: indicator_data = _trader.indicators indicator_data.update({'order':{'buy':[], 'sell':[]}}) indicator_data['order']['buy'] = [ [order[0],order[1]] for order in _trader.trade_recorder if order[4] == 'BUY'] indicator_data['order']['sell'] = [ [order[0],order[1]] for order in _trader.trade_recorder if order[4] == 'SELL'] return(indicator_data) def get_trader_candles(self, market): ''' This can be called to return the candle data for the traders (Will be used to display web UI activity.) ''' for _trader in self.trader_objects: if _trader.print_pair == market: sock_symbol = str(_trader.base_asset)+str(_trader.quote_asset) return(self.socket_api.get_live_candles(sock_symbol)) def start(settings, logs_dir, cache_dir): global core_object, host_ip, host_port if core_object == None: core_object = BotCore(settings, logs_dir, cache_dir) core_object.start() logging.info('[BotCore] Starting traders in {0} mode, market type is {1}.'.format(settings['run_type'], settings['market_type'])) log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) host_ip = settings['host_ip'] host_port = settings['host_port'] SOCKET_IO.run(APP, host=settings['host_ip'], port=settings['host_port'], debug=True, use_reloader=False)
0.267026
0.081119
import os import tempfile import time from collections import defaultdict import pytest from dagster import ( DagsterEventType, Output, OutputDefinition, PipelineRun, RetryRequested, execute_pipeline, execute_pipeline_iterator, lambda_solid, pipeline, reconstructable, reexecute_pipeline, solid, ) from dagster.core.execution.api import create_execution_plan, execute_plan from dagster.core.execution.retries import Retries, RetryMode from dagster.core.test_utils import instance_for_test executors = pytest.mark.parametrize( "environment", [ {"intermediate_storage": {"filesystem": {}}}, {"intermediate_storage": {"filesystem": {}}, "execution": {"multiprocess": {}}}, ], ) def define_run_retry_pipeline(): @solid(config_schema={"fail": bool}) def can_fail(context, _start_fail): if context.solid_config["fail"]: raise Exception("blah") return "okay perfect" @solid( output_defs=[ OutputDefinition(bool, "start_fail", is_required=False), OutputDefinition(bool, "start_skip", is_required=False), ] ) def two_outputs(_): yield Output(True, "start_fail") # won't yield start_skip @solid def will_be_skipped(_, _start_skip): pass # doesn't matter @solid def downstream_of_failed(_, input_str): return input_str @pipeline def pipe(): start_fail, start_skip = two_outputs() downstream_of_failed(can_fail(start_fail)) will_be_skipped(will_be_skipped(start_skip)) return pipe @executors def test_retries(environment): with instance_for_test() as instance: pipe = reconstructable(define_run_retry_pipeline) fails = dict(environment) fails["solids"] = {"can_fail": {"config": {"fail": True}}} result = execute_pipeline(pipe, run_config=fails, instance=instance, raise_on_error=False,) assert not result.success passes = dict(environment) passes["solids"] = {"can_fail": {"config": {"fail": False}}} second_result = reexecute_pipeline( pipe, parent_run_id=result.run_id, run_config=passes, instance=instance, ) assert second_result.success downstream_of_failed = second_result.result_for_solid("downstream_of_failed").output_value() assert downstream_of_failed == "okay perfect" will_be_skipped = [ e for e in second_result.event_list if "will_be_skipped" in str(e.solid_handle) ] assert str(will_be_skipped[0].event_type_value) == "STEP_SKIPPED" assert str(will_be_skipped[1].event_type_value) == "STEP_SKIPPED" def define_step_retry_pipeline(): @solid(config_schema=str) def fail_first_time(context): file = os.path.join(context.solid_config, "i_threw_up") if os.path.exists(file): return "okay perfect" else: open(file, "a").close() raise RetryRequested() @pipeline def step_retry(): fail_first_time() return step_retry @executors def test_step_retry(environment): with instance_for_test() as instance: with tempfile.TemporaryDirectory() as tempdir: env = dict(environment) env["solids"] = {"fail_first_time": {"config": tempdir}} result = execute_pipeline( reconstructable(define_step_retry_pipeline), run_config=env, instance=instance, ) assert result.success events = defaultdict(list) for ev in result.event_list: events[ev.event_type].append(ev) assert len(events[DagsterEventType.STEP_START]) == 1 assert len(events[DagsterEventType.STEP_UP_FOR_RETRY]) == 1 assert len(events[DagsterEventType.STEP_RESTARTED]) == 1 assert len(events[DagsterEventType.STEP_SUCCESS]) == 1 def define_retry_limit_pipeline(): @lambda_solid def default_max(): raise RetryRequested() @lambda_solid def three_max(): raise RetryRequested(max_retries=3) @pipeline def retry_limits(): default_max() three_max() return retry_limits @executors def test_step_retry_limit(environment): with instance_for_test() as instance: result = execute_pipeline( reconstructable(define_retry_limit_pipeline), run_config=environment, raise_on_error=False, instance=instance, ) assert not result.success events = defaultdict(list) for ev in result.events_by_step_key["default_max"]: events[ev.event_type].append(ev) assert len(events[DagsterEventType.STEP_START]) == 1 assert len(events[DagsterEventType.STEP_UP_FOR_RETRY]) == 1 assert len(events[DagsterEventType.STEP_RESTARTED]) == 1 assert len(events[DagsterEventType.STEP_FAILURE]) == 1 events = defaultdict(list) for ev in result.events_by_step_key["three_max"]: events[ev.event_type].append(ev) assert len(events[DagsterEventType.STEP_START]) == 1 assert len(events[DagsterEventType.STEP_UP_FOR_RETRY]) == 3 assert len(events[DagsterEventType.STEP_RESTARTED]) == 3 assert len(events[DagsterEventType.STEP_FAILURE]) == 1 def test_retry_deferral(): with instance_for_test() as instance: events = execute_plan( create_execution_plan(define_retry_limit_pipeline()), pipeline_run=PipelineRun(pipeline_name="retry_limits", run_id="42"), retries=Retries(RetryMode.DEFERRED), instance=instance, ) events_by_type = defaultdict(list) for ev in events: events_by_type[ev.event_type].append(ev) assert len(events_by_type[DagsterEventType.STEP_START]) == 2 assert len(events_by_type[DagsterEventType.STEP_UP_FOR_RETRY]) == 2 assert DagsterEventType.STEP_RESTARTED not in events assert DagsterEventType.STEP_SUCCESS not in events DELAY = 2 def define_retry_wait_fixed_pipeline(): @solid(config_schema=str) def fail_first_and_wait(context): file = os.path.join(context.solid_config, "i_threw_up") if os.path.exists(file): return "okay perfect" else: open(file, "a").close() raise RetryRequested(seconds_to_wait=DELAY) @pipeline def step_retry(): fail_first_and_wait() return step_retry @executors def test_step_retry_fixed_wait(environment): with instance_for_test() as instance: with tempfile.TemporaryDirectory() as tempdir: env = dict(environment) env["solids"] = {"fail_first_and_wait": {"config": tempdir}} event_iter = execute_pipeline_iterator( reconstructable(define_retry_wait_fixed_pipeline), run_config=env, instance=instance, ) start_wait = None end_wait = None success = None for event in event_iter: if event.is_step_up_for_retry: start_wait = time.time() if event.is_step_restarted: end_wait = time.time() if event.is_pipeline_success: success = True assert success assert start_wait is not None assert end_wait is not None delay = end_wait - start_wait assert delay > DELAY
python_modules/dagster/dagster_tests/core_tests/execution_tests/test_retries.py
import os import tempfile import time from collections import defaultdict import pytest from dagster import ( DagsterEventType, Output, OutputDefinition, PipelineRun, RetryRequested, execute_pipeline, execute_pipeline_iterator, lambda_solid, pipeline, reconstructable, reexecute_pipeline, solid, ) from dagster.core.execution.api import create_execution_plan, execute_plan from dagster.core.execution.retries import Retries, RetryMode from dagster.core.test_utils import instance_for_test executors = pytest.mark.parametrize( "environment", [ {"intermediate_storage": {"filesystem": {}}}, {"intermediate_storage": {"filesystem": {}}, "execution": {"multiprocess": {}}}, ], ) def define_run_retry_pipeline(): @solid(config_schema={"fail": bool}) def can_fail(context, _start_fail): if context.solid_config["fail"]: raise Exception("blah") return "okay perfect" @solid( output_defs=[ OutputDefinition(bool, "start_fail", is_required=False), OutputDefinition(bool, "start_skip", is_required=False), ] ) def two_outputs(_): yield Output(True, "start_fail") # won't yield start_skip @solid def will_be_skipped(_, _start_skip): pass # doesn't matter @solid def downstream_of_failed(_, input_str): return input_str @pipeline def pipe(): start_fail, start_skip = two_outputs() downstream_of_failed(can_fail(start_fail)) will_be_skipped(will_be_skipped(start_skip)) return pipe @executors def test_retries(environment): with instance_for_test() as instance: pipe = reconstructable(define_run_retry_pipeline) fails = dict(environment) fails["solids"] = {"can_fail": {"config": {"fail": True}}} result = execute_pipeline(pipe, run_config=fails, instance=instance, raise_on_error=False,) assert not result.success passes = dict(environment) passes["solids"] = {"can_fail": {"config": {"fail": False}}} second_result = reexecute_pipeline( pipe, parent_run_id=result.run_id, run_config=passes, instance=instance, ) assert second_result.success downstream_of_failed = second_result.result_for_solid("downstream_of_failed").output_value() assert downstream_of_failed == "okay perfect" will_be_skipped = [ e for e in second_result.event_list if "will_be_skipped" in str(e.solid_handle) ] assert str(will_be_skipped[0].event_type_value) == "STEP_SKIPPED" assert str(will_be_skipped[1].event_type_value) == "STEP_SKIPPED" def define_step_retry_pipeline(): @solid(config_schema=str) def fail_first_time(context): file = os.path.join(context.solid_config, "i_threw_up") if os.path.exists(file): return "okay perfect" else: open(file, "a").close() raise RetryRequested() @pipeline def step_retry(): fail_first_time() return step_retry @executors def test_step_retry(environment): with instance_for_test() as instance: with tempfile.TemporaryDirectory() as tempdir: env = dict(environment) env["solids"] = {"fail_first_time": {"config": tempdir}} result = execute_pipeline( reconstructable(define_step_retry_pipeline), run_config=env, instance=instance, ) assert result.success events = defaultdict(list) for ev in result.event_list: events[ev.event_type].append(ev) assert len(events[DagsterEventType.STEP_START]) == 1 assert len(events[DagsterEventType.STEP_UP_FOR_RETRY]) == 1 assert len(events[DagsterEventType.STEP_RESTARTED]) == 1 assert len(events[DagsterEventType.STEP_SUCCESS]) == 1 def define_retry_limit_pipeline(): @lambda_solid def default_max(): raise RetryRequested() @lambda_solid def three_max(): raise RetryRequested(max_retries=3) @pipeline def retry_limits(): default_max() three_max() return retry_limits @executors def test_step_retry_limit(environment): with instance_for_test() as instance: result = execute_pipeline( reconstructable(define_retry_limit_pipeline), run_config=environment, raise_on_error=False, instance=instance, ) assert not result.success events = defaultdict(list) for ev in result.events_by_step_key["default_max"]: events[ev.event_type].append(ev) assert len(events[DagsterEventType.STEP_START]) == 1 assert len(events[DagsterEventType.STEP_UP_FOR_RETRY]) == 1 assert len(events[DagsterEventType.STEP_RESTARTED]) == 1 assert len(events[DagsterEventType.STEP_FAILURE]) == 1 events = defaultdict(list) for ev in result.events_by_step_key["three_max"]: events[ev.event_type].append(ev) assert len(events[DagsterEventType.STEP_START]) == 1 assert len(events[DagsterEventType.STEP_UP_FOR_RETRY]) == 3 assert len(events[DagsterEventType.STEP_RESTARTED]) == 3 assert len(events[DagsterEventType.STEP_FAILURE]) == 1 def test_retry_deferral(): with instance_for_test() as instance: events = execute_plan( create_execution_plan(define_retry_limit_pipeline()), pipeline_run=PipelineRun(pipeline_name="retry_limits", run_id="42"), retries=Retries(RetryMode.DEFERRED), instance=instance, ) events_by_type = defaultdict(list) for ev in events: events_by_type[ev.event_type].append(ev) assert len(events_by_type[DagsterEventType.STEP_START]) == 2 assert len(events_by_type[DagsterEventType.STEP_UP_FOR_RETRY]) == 2 assert DagsterEventType.STEP_RESTARTED not in events assert DagsterEventType.STEP_SUCCESS not in events DELAY = 2 def define_retry_wait_fixed_pipeline(): @solid(config_schema=str) def fail_first_and_wait(context): file = os.path.join(context.solid_config, "i_threw_up") if os.path.exists(file): return "okay perfect" else: open(file, "a").close() raise RetryRequested(seconds_to_wait=DELAY) @pipeline def step_retry(): fail_first_and_wait() return step_retry @executors def test_step_retry_fixed_wait(environment): with instance_for_test() as instance: with tempfile.TemporaryDirectory() as tempdir: env = dict(environment) env["solids"] = {"fail_first_and_wait": {"config": tempdir}} event_iter = execute_pipeline_iterator( reconstructable(define_retry_wait_fixed_pipeline), run_config=env, instance=instance, ) start_wait = None end_wait = None success = None for event in event_iter: if event.is_step_up_for_retry: start_wait = time.time() if event.is_step_restarted: end_wait = time.time() if event.is_pipeline_success: success = True assert success assert start_wait is not None assert end_wait is not None delay = end_wait - start_wait assert delay > DELAY
0.417153
0.374905
"""End to end tests for TrainRunner.""" import datetime import os import shutil from absl import flags from dopamine.discrete_domains import train import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS class TrainRunnerIntegrationTest(tf.test.TestCase): """Tests for Atari environment with various agents. """ def setUp(self): FLAGS.base_dir = os.path.join( '/tmp/dopamine_tests', datetime.datetime.utcnow().strftime('run_%Y_%m_%d_%H_%M_%S')) self._checkpoint_dir = os.path.join(FLAGS.base_dir, 'checkpoints') self._logging_dir = os.path.join(FLAGS.base_dir, 'logs') def quickDqnFlags(self): """Assign flags for a quick run of DQN agent.""" FLAGS.gin_files = ['dopamine/agents/dqn/configs/dqn.gin'] FLAGS.gin_bindings = [ "create_runner.schedule='continuous_train'", 'Runner.training_steps=100', 'Runner.evaluation_steps=10', 'Runner.num_iterations=1', 'Runner.max_steps_per_episode=100', 'dqn_agent.DQNAgent.min_replay_history=500', 'WrappedReplayBuffer.replay_capacity=100' ] FLAGS.alsologtostderr = True def verifyFilesCreated(self, base_dir): """Verify that files have been created.""" # Check checkpoint files self.assertTrue( os.path.exists(os.path.join(self._checkpoint_dir, 'ckpt.0'))) self.assertTrue( os.path.exists(os.path.join(self._checkpoint_dir, 'checkpoint'))) self.assertTrue( os.path.exists( os.path.join(self._checkpoint_dir, 'sentinel_checkpoint_complete.0'))) # Check log files self.assertTrue(os.path.exists(os.path.join(self._logging_dir, 'log_0'))) def testIntegrationDqn(self): """Test the DQN agent.""" tf.logging.info('####### Training the DQN agent #####') tf.logging.info('####### DQN base_dir: {}'.format(FLAGS.base_dir)) self.quickDqnFlags() train.main([]) self.verifyFilesCreated(FLAGS.base_dir) shutil.rmtree(FLAGS.base_dir) if __name__ == '__main__': tf.test.main()
tests/dopamine/tests/train_runner_integration_test.py
"""End to end tests for TrainRunner.""" import datetime import os import shutil from absl import flags from dopamine.discrete_domains import train import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS class TrainRunnerIntegrationTest(tf.test.TestCase): """Tests for Atari environment with various agents. """ def setUp(self): FLAGS.base_dir = os.path.join( '/tmp/dopamine_tests', datetime.datetime.utcnow().strftime('run_%Y_%m_%d_%H_%M_%S')) self._checkpoint_dir = os.path.join(FLAGS.base_dir, 'checkpoints') self._logging_dir = os.path.join(FLAGS.base_dir, 'logs') def quickDqnFlags(self): """Assign flags for a quick run of DQN agent.""" FLAGS.gin_files = ['dopamine/agents/dqn/configs/dqn.gin'] FLAGS.gin_bindings = [ "create_runner.schedule='continuous_train'", 'Runner.training_steps=100', 'Runner.evaluation_steps=10', 'Runner.num_iterations=1', 'Runner.max_steps_per_episode=100', 'dqn_agent.DQNAgent.min_replay_history=500', 'WrappedReplayBuffer.replay_capacity=100' ] FLAGS.alsologtostderr = True def verifyFilesCreated(self, base_dir): """Verify that files have been created.""" # Check checkpoint files self.assertTrue( os.path.exists(os.path.join(self._checkpoint_dir, 'ckpt.0'))) self.assertTrue( os.path.exists(os.path.join(self._checkpoint_dir, 'checkpoint'))) self.assertTrue( os.path.exists( os.path.join(self._checkpoint_dir, 'sentinel_checkpoint_complete.0'))) # Check log files self.assertTrue(os.path.exists(os.path.join(self._logging_dir, 'log_0'))) def testIntegrationDqn(self): """Test the DQN agent.""" tf.logging.info('####### Training the DQN agent #####') tf.logging.info('####### DQN base_dir: {}'.format(FLAGS.base_dir)) self.quickDqnFlags() train.main([]) self.verifyFilesCreated(FLAGS.base_dir) shutil.rmtree(FLAGS.base_dir) if __name__ == '__main__': tf.test.main()
0.554229
0.316713
import asyncio import random import time from types import SimpleNamespace from typing import Union, List, Tuple import aiohttp from aiohttp import ClientSession, TraceRequestStartParams, TraceRequestEndParams Number = Union[int, float] class FlowController(aiohttp.TraceConfig): def __init__(self, interval: Union[Number, List[Number], Tuple[Number, Number]] = 1, ctx_key = 'host', ): super().__init__() self.interval = interval self.ctx_key = ctx_key self.store = {} self.on_request_start.append(self.__on_request_start) self.on_request_end.append(self.__on_request_end) def _get_key(self, trace_config_ctx: SimpleNamespace, params: Union[TraceRequestStartParams, TraceRequestEndParams]): key = trace_config_ctx.trace_request_ctx and trace_config_ctx.trace_request_ctx.get(self.ctx_key) return key or params.url.host async def __on_request_start(self, session: ClientSession, trace_config_ctx: SimpleNamespace, params: TraceRequestStartParams): key = self._get_key(trace_config_ctx, params) if key: if not self.store.get(key): self.store[key] = { 'last_start_time': time.time(), 'last_end_time': None, } else: interval = self.interval if isinstance(self.interval, (int, float)) else random.uniform(*self.interval) start_time = time.time() while True: store = self.store[key] if store.get('last_end_time') and store.get('last_start_time') < store.get('last_end_time') and store.get('last_end_time') + interval < time.time(): store['last_start_time'] = time.time() break # set max interval, avoid endless loop on some condition when error occurs if time.time() - start_time > 10 * interval: print(f'warning: "{key}" store may not be set properly (url: {params.url})') store['last_start_time'] = time.time() break await asyncio.sleep(min(1, interval / 5)) async def __on_request_end(self, session: ClientSession, trace_config_ctx: SimpleNamespace, params: TraceRequestEndParams): key = self._get_key(trace_config_ctx, params) if key: assert self.store[key] is not None self.store[key]['last_end_time'] = time.time()
scripts/flow_controller.py
import asyncio import random import time from types import SimpleNamespace from typing import Union, List, Tuple import aiohttp from aiohttp import ClientSession, TraceRequestStartParams, TraceRequestEndParams Number = Union[int, float] class FlowController(aiohttp.TraceConfig): def __init__(self, interval: Union[Number, List[Number], Tuple[Number, Number]] = 1, ctx_key = 'host', ): super().__init__() self.interval = interval self.ctx_key = ctx_key self.store = {} self.on_request_start.append(self.__on_request_start) self.on_request_end.append(self.__on_request_end) def _get_key(self, trace_config_ctx: SimpleNamespace, params: Union[TraceRequestStartParams, TraceRequestEndParams]): key = trace_config_ctx.trace_request_ctx and trace_config_ctx.trace_request_ctx.get(self.ctx_key) return key or params.url.host async def __on_request_start(self, session: ClientSession, trace_config_ctx: SimpleNamespace, params: TraceRequestStartParams): key = self._get_key(trace_config_ctx, params) if key: if not self.store.get(key): self.store[key] = { 'last_start_time': time.time(), 'last_end_time': None, } else: interval = self.interval if isinstance(self.interval, (int, float)) else random.uniform(*self.interval) start_time = time.time() while True: store = self.store[key] if store.get('last_end_time') and store.get('last_start_time') < store.get('last_end_time') and store.get('last_end_time') + interval < time.time(): store['last_start_time'] = time.time() break # set max interval, avoid endless loop on some condition when error occurs if time.time() - start_time > 10 * interval: print(f'warning: "{key}" store may not be set properly (url: {params.url})') store['last_start_time'] = time.time() break await asyncio.sleep(min(1, interval / 5)) async def __on_request_end(self, session: ClientSession, trace_config_ctx: SimpleNamespace, params: TraceRequestEndParams): key = self._get_key(trace_config_ctx, params) if key: assert self.store[key] is not None self.store[key]['last_end_time'] = time.time()
0.668556
0.073065
from typing import Dict, List, Tuple, Set, NamedTuple, Optional import csv, re, os, operator, sys import xml.etree.ElementTree as ET maxsize = sys.maxsize while True: try: csv.field_size_limit(maxsize) break except OverflowError: maxsize //= 2 # TYPES class Row(NamedTuple): guid: str order_marker: int textdata: str # PARSING def load_file(filename: str) -> List[Row]: """ Returns a list of events, not sorted or filtered. """ _, ext = os.path.splitext(filename) if ext == ".csv": with open(filename) as file: dict_rows = csv.DictReader(file) rows = [make_row_from_jarvis(row["MessageText"]) for row in dict_rows] return [r for r in rows if r] elif ext == ".xml": tree = ET.parse(filename) ns = {"": "http://tempuri.org/TracePersistence.xsd"} xml_rows: List[Optional[Row]] = [] for event in tree.findall(".//Event", ns): xml_rows.append(make_row_from_xml(event, ns)) return [r for r in xml_rows if r] else: return [] def make_row_from_xml(event: ET.Element, ns: Dict[str, str]) -> Optional[Row]: if event.attrib["id"] != "134": return None textdata = None order_marker = None guid = None subclass = None for col in event.findall("Column", ns): if col.attrib["id"] == "46": guid = col.text if col.attrib["id"] == "1": subclass = col.text if col.attrib["id"] == "10" and col.text: order_marker = int(col.text) if col.attrib["id"] == "42": textdata = col.text if textdata and order_marker is not None and guid and subclass: suffix = "annotated" if subclass == "2" else "plan" return Row(f"{guid}-{suffix}", order_marker, textdata) return None def make_row_from_jarvis(message_txt: str) -> Optional[Row]: if "graphcorrelationid" in message_txt.lower(): print( "This event is from an older version of the job graph feature (shouldn't have 'GraphCorrelationID' in it)" ) match = re.match(r"TextData: (.*); IntegerData: (.\d*)", message_txt) if match: textdata, guid, order_marker_str = match.group(1, 2, 3) order_marker = int(order_marker_str) return Row(guid, order_marker, textdata) return None def extract_metadata(header_row: Row) -> Optional[Tuple[int, int]]: # should really extract things correctly here m = re.match( r".*Length=\"(\d*)\".*AdditionalEvents=\"(\d*)\".*", header_row.textdata ) if not m: return None return int(m.group(1)), int(m.group(2)) def remove_pii_tags(protected_data: str) -> str: if protected_data[:5] == "<pii>" and protected_data[-6:] == "</pii>": return protected_data[5:-6] return protected_data def get_all_guids(data: List[Row]) -> Set[str]: return {row.guid for row in data} # GRAPH def get_graph(data: List[Row], guid: str) -> Tuple[str, str]: rows = [row for row in data if row.guid == guid] rows = sorted(rows, key=operator.attrgetter("order_marker")) header, *graph_data = rows metadata = extract_metadata(header) if metadata: size, additional_events = metadata assert additional_events == len( graph_data ), f"metadata says there are {additional_events} rows; but there are {len(graph_data)}" graph_str_builder = [remove_pii_tags(row.textdata) for row in graph_data] return "".join(graph_str_builder), guid # INPUT/OUTPUT FILES def get_all_event_files() -> List[str]: return [os.path.join("data", f) for f in os.listdir("data")] def get_output_file(input_file: str, guid: str, output_folder: str) -> str: _, input_file = os.path.split(input_file) name, ext = os.path.splitext(input_file) os.makedirs(output_folder, exist_ok=True) return os.path.join(output_folder, f"{name}-{guid}.DGML") def writefile(filename: str, data: str) -> None: with open(filename, "w") as file: file.write(data) def reassemble_file(filename: str) -> List[Tuple[str, str]]: result: List[Tuple[str, str]] = [] try: data = load_file(filename) guids = get_all_guids(data) for guid in guids: result.append(get_graph(data, guid)) except (IndexError, ValueError) as e: print(f"error processing {filename}: {e}") return result def all_files() -> None: if not os.path.isdir("data"): print("directory 'data' does not exist.") return for input_file in get_all_event_files(): try: data = load_file(input_file) guids = get_all_guids(data) os.makedirs("output", exist_ok=True) for guid in guids: graph, _ = get_graph(data, guid) output_file = get_output_file(input_file, guid, "output") print(f'Saving "{output_file}"') writefile(output_file, graph) except (IndexError, ValueError) as e: print(f"error processing {input_file}: {e}") # SCRIPT def print_help() -> None: print( """ Guide for rebuild.py (requires Python 3.8 or later) Use: \tpython rebuild.py \tRebuilds all graphs in "./data" and writes them to "./output". \tpython rebuild.py <inputfile> <outputfolder>\tRebuilds <inputfile> and writes them to <outputfolder> """ ) def main() -> None: if len(sys.argv) == 1: print("Reassembling all graphs in ./data") all_files() if len(sys.argv) == 2: print_help() if len(sys.argv) == 3: _, input_file, output_folder = sys.argv for graph, guid in reassemble_file(input_file): output_file = get_output_file(input_file, guid, output_folder) print(f'Saving "{output_file}"') writefile(get_output_file(input_file, guid, output_folder), graph) if __name__ == "__main__": main()
ASJobGraphEvents/rebuild.py
from typing import Dict, List, Tuple, Set, NamedTuple, Optional import csv, re, os, operator, sys import xml.etree.ElementTree as ET maxsize = sys.maxsize while True: try: csv.field_size_limit(maxsize) break except OverflowError: maxsize //= 2 # TYPES class Row(NamedTuple): guid: str order_marker: int textdata: str # PARSING def load_file(filename: str) -> List[Row]: """ Returns a list of events, not sorted or filtered. """ _, ext = os.path.splitext(filename) if ext == ".csv": with open(filename) as file: dict_rows = csv.DictReader(file) rows = [make_row_from_jarvis(row["MessageText"]) for row in dict_rows] return [r for r in rows if r] elif ext == ".xml": tree = ET.parse(filename) ns = {"": "http://tempuri.org/TracePersistence.xsd"} xml_rows: List[Optional[Row]] = [] for event in tree.findall(".//Event", ns): xml_rows.append(make_row_from_xml(event, ns)) return [r for r in xml_rows if r] else: return [] def make_row_from_xml(event: ET.Element, ns: Dict[str, str]) -> Optional[Row]: if event.attrib["id"] != "134": return None textdata = None order_marker = None guid = None subclass = None for col in event.findall("Column", ns): if col.attrib["id"] == "46": guid = col.text if col.attrib["id"] == "1": subclass = col.text if col.attrib["id"] == "10" and col.text: order_marker = int(col.text) if col.attrib["id"] == "42": textdata = col.text if textdata and order_marker is not None and guid and subclass: suffix = "annotated" if subclass == "2" else "plan" return Row(f"{guid}-{suffix}", order_marker, textdata) return None def make_row_from_jarvis(message_txt: str) -> Optional[Row]: if "graphcorrelationid" in message_txt.lower(): print( "This event is from an older version of the job graph feature (shouldn't have 'GraphCorrelationID' in it)" ) match = re.match(r"TextData: (.*); IntegerData: (.\d*)", message_txt) if match: textdata, guid, order_marker_str = match.group(1, 2, 3) order_marker = int(order_marker_str) return Row(guid, order_marker, textdata) return None def extract_metadata(header_row: Row) -> Optional[Tuple[int, int]]: # should really extract things correctly here m = re.match( r".*Length=\"(\d*)\".*AdditionalEvents=\"(\d*)\".*", header_row.textdata ) if not m: return None return int(m.group(1)), int(m.group(2)) def remove_pii_tags(protected_data: str) -> str: if protected_data[:5] == "<pii>" and protected_data[-6:] == "</pii>": return protected_data[5:-6] return protected_data def get_all_guids(data: List[Row]) -> Set[str]: return {row.guid for row in data} # GRAPH def get_graph(data: List[Row], guid: str) -> Tuple[str, str]: rows = [row for row in data if row.guid == guid] rows = sorted(rows, key=operator.attrgetter("order_marker")) header, *graph_data = rows metadata = extract_metadata(header) if metadata: size, additional_events = metadata assert additional_events == len( graph_data ), f"metadata says there are {additional_events} rows; but there are {len(graph_data)}" graph_str_builder = [remove_pii_tags(row.textdata) for row in graph_data] return "".join(graph_str_builder), guid # INPUT/OUTPUT FILES def get_all_event_files() -> List[str]: return [os.path.join("data", f) for f in os.listdir("data")] def get_output_file(input_file: str, guid: str, output_folder: str) -> str: _, input_file = os.path.split(input_file) name, ext = os.path.splitext(input_file) os.makedirs(output_folder, exist_ok=True) return os.path.join(output_folder, f"{name}-{guid}.DGML") def writefile(filename: str, data: str) -> None: with open(filename, "w") as file: file.write(data) def reassemble_file(filename: str) -> List[Tuple[str, str]]: result: List[Tuple[str, str]] = [] try: data = load_file(filename) guids = get_all_guids(data) for guid in guids: result.append(get_graph(data, guid)) except (IndexError, ValueError) as e: print(f"error processing {filename}: {e}") return result def all_files() -> None: if not os.path.isdir("data"): print("directory 'data' does not exist.") return for input_file in get_all_event_files(): try: data = load_file(input_file) guids = get_all_guids(data) os.makedirs("output", exist_ok=True) for guid in guids: graph, _ = get_graph(data, guid) output_file = get_output_file(input_file, guid, "output") print(f'Saving "{output_file}"') writefile(output_file, graph) except (IndexError, ValueError) as e: print(f"error processing {input_file}: {e}") # SCRIPT def print_help() -> None: print( """ Guide for rebuild.py (requires Python 3.8 or later) Use: \tpython rebuild.py \tRebuilds all graphs in "./data" and writes them to "./output". \tpython rebuild.py <inputfile> <outputfolder>\tRebuilds <inputfile> and writes them to <outputfolder> """ ) def main() -> None: if len(sys.argv) == 1: print("Reassembling all graphs in ./data") all_files() if len(sys.argv) == 2: print_help() if len(sys.argv) == 3: _, input_file, output_folder = sys.argv for graph, guid in reassemble_file(input_file): output_file = get_output_file(input_file, guid, output_folder) print(f'Saving "{output_file}"') writefile(get_output_file(input_file, guid, output_folder), graph) if __name__ == "__main__": main()
0.588534
0.309376
import numpy as np import pandas as pd import matplotlib.pyplot as plt from datetime import timedelta import datetime import csv import math from pyecharts.charts import Bar from pyecharts import options as opts from pyecharts.render import make_snapshot from snapshot_selenium import snapshot data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() svvd_name=data['SVVD'].unique() data['ordinary']=0 for i in range(len(data)): if data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days for j in range(len(i)): if i.iloc[j,13]==14: i.iloc[j,13]=13.99 i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] i['acc_20gp']=0 i['acc_40gp']=0 i['acc_40hq']=0 i['acc_price']=0 ####new_strategy#### total_list=pd.concat(newlist) gp_total_list=total_list.groupby('day') daysale=gp_total_list['WBL_NUM'].count() daysale=daysale.sort_values(ascending=False) daysale=pd.DataFrame(daysale) daysale.to_csv('daysale.csv') del daysale daysale=pd.read_csv('daysale.csv') break_day=[] for i in range(3): break_day.append(int(daysale.iloc[i,0])) break_day.sort() new_break_day=[] for i in range(len(break_day)): new_break_day.append(break_day[i]+1) print(new_break_day) new_day_list=[] for i in range(3): if i==2: if new_break_day[i]>14: break else: new_day_list.append((new_break_day[i],14)) else : new_day_list.append((new_break_day[i],new_break_day[i+1])) total_data=pd.read_excel('test.xlsx') group_=total_data.groupby('SVVD') SVVD_inf=group_['WBL_AUD_DT'].max().sort_values() SVVD_inf=pd.DataFrame(SVVD_inf) SVVD_inf.to_csv('SVVD_inf.csv') del SVVD_inf SVVD_inf=pd.read_csv('SVVD_inf.csv') SVVD_inf['WBL_AUD_DT']=pd.to_datetime(SVVD_inf['WBL_AUD_DT']) req_day=datetime.timedelta(days=2) SVVD_inf['compete']=0 compete_factor=[] for i in range(len(SVVD_inf)): former_two_day=SVVD_inf.iloc[i]['WBL_AUD_DT']-req_day latter_two_day=SVVD_inf.iloc[i]['WBL_AUD_DT']+req_day for j in range(len(SVVD_inf)): if ((SVVD_inf.iloc[j]['WBL_AUD_DT']>=former_two_day) and (SVVD_inf.iloc[j]['WBL_AUD_DT']<=latter_two_day)): SVVD_inf.iloc[i,2]=SVVD_inf.iloc[i,2]+1 if SVVD_inf.iloc[i]['SVVD'] in svvd_name: compete_factor.append((SVVD_inf.iloc[i]['SVVD'],SVVD_inf.iloc[i]['compete'])) print(SVVD_inf) compete_delta_price=[] for i in compete_factor: tmp1=i[0] tmp2=i[1] tmp2=(math.log(5,10)-math.log(tmp2,10))*50 compete_delta_price.append((tmp1,tmp2)) print(compete_delta_price) def delta_price(i): if i<40: return 0 if i>=40 and i<=70: return math.pow(9,(i-40)/15)-1 if i>70: return 80 for k in range(len(newlist)): acc_price=0 compete_price=0 for i in compete_delta_price: if newlist[k].iloc[0]['SVVD']==i[0]: compete_price=i[1] break for i in range(len(new_day_list)): print('i='+str(i)) first_day=new_day_list[i][0] last_day=new_day_list[i][1] flag=0 for j in range(len(newlist[k])): if ((newlist[k].iloc[j]['day']>=first_day) and (newlist[k].iloc[j]['day']<last_day) and (newlist[k].iloc[j]['ordinary']==1)): if newlist[k].iloc[j]['acc_rate']<30: if flag==0: flag=1 acc_price-=10*math.exp(i) newlist[k].iloc[j,19]+=acc_price for j_ in range(len(newlist[k])): if newlist[k].iloc[j_]['ordinary']==1: newlist[k].iloc[j_,19]+=delta_price(newlist[k].iloc[j_]['acc_rate']) if newlist[k].iloc[j_]['day']>=2: newlist[k].iloc[j_,19]+=compete_price newlist[k]['new_AMT1']=newlist[k]['AMT']+newlist[k]['acc_price'] for i in range(len(newlist)): newlist[i].to_csv('newvoyage'+str(i+1)+'-3.csv') raw_r=0 new_r=0 for i in newlist: print('revenue:'+str(i['AMT'].sum())) raw_r=raw_r+i['AMT'].sum() print('newrevenue:'+str(i['new_AMT1'].sum())) new_r=new_r+i['new_AMT1'].sum() print('total revenue:'+str(raw_r)) print('total newrevenue:'+str(new_r))
2021-02-03/code.py
import numpy as np import pandas as pd import matplotlib.pyplot as plt from datetime import timedelta import datetime import csv import math from pyecharts.charts import Bar from pyecharts import options as opts from pyecharts.render import make_snapshot from snapshot_selenium import snapshot data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() svvd_name=data['SVVD'].unique() data['ordinary']=0 for i in range(len(data)): if data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days for j in range(len(i)): if i.iloc[j,13]==14: i.iloc[j,13]=13.99 i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] i['acc_20gp']=0 i['acc_40gp']=0 i['acc_40hq']=0 i['acc_price']=0 ####new_strategy#### total_list=pd.concat(newlist) gp_total_list=total_list.groupby('day') daysale=gp_total_list['WBL_NUM'].count() daysale=daysale.sort_values(ascending=False) daysale=pd.DataFrame(daysale) daysale.to_csv('daysale.csv') del daysale daysale=pd.read_csv('daysale.csv') break_day=[] for i in range(3): break_day.append(int(daysale.iloc[i,0])) break_day.sort() new_break_day=[] for i in range(len(break_day)): new_break_day.append(break_day[i]+1) print(new_break_day) new_day_list=[] for i in range(3): if i==2: if new_break_day[i]>14: break else: new_day_list.append((new_break_day[i],14)) else : new_day_list.append((new_break_day[i],new_break_day[i+1])) total_data=pd.read_excel('test.xlsx') group_=total_data.groupby('SVVD') SVVD_inf=group_['WBL_AUD_DT'].max().sort_values() SVVD_inf=pd.DataFrame(SVVD_inf) SVVD_inf.to_csv('SVVD_inf.csv') del SVVD_inf SVVD_inf=pd.read_csv('SVVD_inf.csv') SVVD_inf['WBL_AUD_DT']=pd.to_datetime(SVVD_inf['WBL_AUD_DT']) req_day=datetime.timedelta(days=2) SVVD_inf['compete']=0 compete_factor=[] for i in range(len(SVVD_inf)): former_two_day=SVVD_inf.iloc[i]['WBL_AUD_DT']-req_day latter_two_day=SVVD_inf.iloc[i]['WBL_AUD_DT']+req_day for j in range(len(SVVD_inf)): if ((SVVD_inf.iloc[j]['WBL_AUD_DT']>=former_two_day) and (SVVD_inf.iloc[j]['WBL_AUD_DT']<=latter_two_day)): SVVD_inf.iloc[i,2]=SVVD_inf.iloc[i,2]+1 if SVVD_inf.iloc[i]['SVVD'] in svvd_name: compete_factor.append((SVVD_inf.iloc[i]['SVVD'],SVVD_inf.iloc[i]['compete'])) print(SVVD_inf) compete_delta_price=[] for i in compete_factor: tmp1=i[0] tmp2=i[1] tmp2=(math.log(5,10)-math.log(tmp2,10))*50 compete_delta_price.append((tmp1,tmp2)) print(compete_delta_price) def delta_price(i): if i<40: return 0 if i>=40 and i<=70: return math.pow(9,(i-40)/15)-1 if i>70: return 80 for k in range(len(newlist)): acc_price=0 compete_price=0 for i in compete_delta_price: if newlist[k].iloc[0]['SVVD']==i[0]: compete_price=i[1] break for i in range(len(new_day_list)): print('i='+str(i)) first_day=new_day_list[i][0] last_day=new_day_list[i][1] flag=0 for j in range(len(newlist[k])): if ((newlist[k].iloc[j]['day']>=first_day) and (newlist[k].iloc[j]['day']<last_day) and (newlist[k].iloc[j]['ordinary']==1)): if newlist[k].iloc[j]['acc_rate']<30: if flag==0: flag=1 acc_price-=10*math.exp(i) newlist[k].iloc[j,19]+=acc_price for j_ in range(len(newlist[k])): if newlist[k].iloc[j_]['ordinary']==1: newlist[k].iloc[j_,19]+=delta_price(newlist[k].iloc[j_]['acc_rate']) if newlist[k].iloc[j_]['day']>=2: newlist[k].iloc[j_,19]+=compete_price newlist[k]['new_AMT1']=newlist[k]['AMT']+newlist[k]['acc_price'] for i in range(len(newlist)): newlist[i].to_csv('newvoyage'+str(i+1)+'-3.csv') raw_r=0 new_r=0 for i in newlist: print('revenue:'+str(i['AMT'].sum())) raw_r=raw_r+i['AMT'].sum() print('newrevenue:'+str(i['new_AMT1'].sum())) new_r=new_r+i['new_AMT1'].sum() print('total revenue:'+str(raw_r)) print('total newrevenue:'+str(new_r))
0.031574
0.102574
import time from owlracer.env import Env as Owlracer_Env from owlracer import owlParser def calculate_action(step_result, list): distance_right = step_result.distance.right distance_front_right = step_result.distance.frontRight distance_left = step_result.distance.left distance_front_left = step_result.distance.frontLeft if list["fixed_left"] > 0: list["fixed_left"] = list["fixed_left"]-1 if list["fixed_left"] > 30: return 2 else: return 3 elif list["fixed_right"] > 0: list["fixed_right"] = list["fixed_right"]-1 if list["fixed_right"] > 30: return 2 return 4 elif distance_left > 200 and list["fixed_left"] == 0: list["fixed_left"] = 80 print("distance left big!") return 2 elif distance_right > 200 and list["fixed_right"] == 0: list["fixed_right"] = 80 print("distance left big!") return 2 else: if distance_front_left == 0: ratio = distance_front_right/(distance_front_left + 0.00001) else: ratio = float(distance_front_right)/distance_front_left if step_result.distance.front >= 50: if ratio < 1: return 3 elif ratio > 1: return 4 else: return 1 else: if ratio < 1: return 5 elif ratio > 1: return 6 else: return 2 @owlParser def main_loop(args): env = Owlracer_Env(ip=args.ip, port=args.port, spectator=args.spectator, session=args.session, carName="Rule-based (Py)", carColor="#07f036") step_result = env.step(0) list ={ "fixed_left": 0, "fixed_right": 0 } while True: # waiting for game to start while env.isPrerace or env.isPaused: env.updateSession() time.sleep(0.1) action = calculate_action(step_result, list) step_result = env.step(action) print("Car Left/right: {} {}, Vel: {} forward distance {}".format(step_result.distance.left, step_result.distance.right, step_result.velocity, step_result.distance.front)) # sleep for human time.sleep(0.01) if __name__ == '__main__': main_loop()
examples/RuleBasedEngine.py
import time from owlracer.env import Env as Owlracer_Env from owlracer import owlParser def calculate_action(step_result, list): distance_right = step_result.distance.right distance_front_right = step_result.distance.frontRight distance_left = step_result.distance.left distance_front_left = step_result.distance.frontLeft if list["fixed_left"] > 0: list["fixed_left"] = list["fixed_left"]-1 if list["fixed_left"] > 30: return 2 else: return 3 elif list["fixed_right"] > 0: list["fixed_right"] = list["fixed_right"]-1 if list["fixed_right"] > 30: return 2 return 4 elif distance_left > 200 and list["fixed_left"] == 0: list["fixed_left"] = 80 print("distance left big!") return 2 elif distance_right > 200 and list["fixed_right"] == 0: list["fixed_right"] = 80 print("distance left big!") return 2 else: if distance_front_left == 0: ratio = distance_front_right/(distance_front_left + 0.00001) else: ratio = float(distance_front_right)/distance_front_left if step_result.distance.front >= 50: if ratio < 1: return 3 elif ratio > 1: return 4 else: return 1 else: if ratio < 1: return 5 elif ratio > 1: return 6 else: return 2 @owlParser def main_loop(args): env = Owlracer_Env(ip=args.ip, port=args.port, spectator=args.spectator, session=args.session, carName="Rule-based (Py)", carColor="#07f036") step_result = env.step(0) list ={ "fixed_left": 0, "fixed_right": 0 } while True: # waiting for game to start while env.isPrerace or env.isPaused: env.updateSession() time.sleep(0.1) action = calculate_action(step_result, list) step_result = env.step(action) print("Car Left/right: {} {}, Vel: {} forward distance {}".format(step_result.distance.left, step_result.distance.right, step_result.velocity, step_result.distance.front)) # sleep for human time.sleep(0.01) if __name__ == '__main__': main_loop()
0.408985
0.284806
import argparse import ast import json import os import re import tqdm from .simpledicomanonymizer import * def anonymize(inputPath, outputPath, anonymizationActions): # Get input arguments InputFolder = '' OutputFolder = '' if os.path.isdir(inputPath): InputFolder = inputPath if os.path.isdir(outputPath): OutputFolder = outputPath if InputFolder == '': outputPath = OutputFolder + os.path.basename(inputPath) if InputFolder != '' and OutputFolder == '': print('Error, please set a correct output folder path') sys.exit() # Generate list of input file if a folder has been set inputFilesList = [] outputFilesList = [] if InputFolder == '': inputFilesList.append(inputPath) outputFilesList.append(outputPath) else: files = os.listdir(InputFolder) for fileName in files: inputFilesList.append(InputFolder + '/' + fileName) outputFilesList.append(OutputFolder + '/' + fileName) progressBar = tqdm.tqdm(total=len(inputFilesList)) for cpt in range(len(inputFilesList)): anonymizeDICOMFile(inputFilesList[cpt], outputFilesList[cpt], anonymizationActions) progressBar.update(1) progressBar.close() def generateActionsDictionary(mapActionTag, definedActionMap = {}): generatedMap = {} cpt = 0 for tag in mapActionTag: test = [tag] action = mapActionTag[tag] # Define the associated function to the tag if callable(action): actionFunction = action else: actionFunction = definedActionMap[action] if action in definedActionMap else eval(action) # Generate the map if cpt == 0: generatedMap = generateActions(test, actionFunction) else: generatedMap.update(generateActions(test, actionFunction)) cpt += 1 return generatedMap def main(definedActionMap = {}): parser = argparse.ArgumentParser(add_help=True) parser.add_argument('input', help='Path to the input dicom file or input directory which contains dicom files') parser.add_argument('output', help='Path to the output dicom file or output directory which will contains dicom files') parser.add_argument('-t', action='append', nargs='*', help='tags action : Defines a new action to apply on the tag.'\ '\'regexp\' action takes two arguments: '\ '1. regexp to find substring '\ '2. the string that will replace the previous found string') parser.add_argument('--dictionary', action='store', help='File which contains a dictionary that can be added to the original one') args = parser.parse_args() InputPath = args.input OutputPath = args.output # Create a new actions' dictionary from parameters newAnonymizationActions = {} cpt = 0 if args.t: numberOfNewTagsActions = len(args.t) if numberOfNewTagsActions > 0: for i in range(numberOfNewTagsActions): currentTagParameters = args.t[i] nbParameters = len(currentTagParameters) if nbParameters == 0: continue options = None actionName = currentTagParameters[1] # Means that we are in regexp mode if nbParameters == 4: options = { "find": currentTagParameters[2], "replace": currentTagParameters[3] } tagsList = [ast.literal_eval(currentTagParameters[0])] action = eval(actionName) # When generateActions is called and we have options, we don't want use regexp # as an action but we want to call it to generate a new method if options is not None: action = actionName if cpt == 0: newAnonymizationActions = generateActions(tagsList, action, options) else: newAnonymizationActions.update(generateActions(tagsList, action, options)) cpt += 1 # Read an existing dictionary if args.dictionary: with open(args.dictionary) as json_file: data = json.load(json_file) for key, value in data.items(): actionName = value options = None if type(value) is dict: actionName = value['action'] options = { "find": value['find'], "replace" : value['replace'] } l = [ast.literal_eval(key)] action = definedActionMap[actionName] if actionName in definedActionMap else eval(actionName) if cpt == 0: newAnonymizationActions = generateActions(l, action, options) else: newAnonymizationActions.update(generateActions(l, action, options)) cpt += 1 # Launch the anonymization anonymize(InputPath, OutputPath, newAnonymizationActions)
dicomanonymizer/anonymizer.py
import argparse import ast import json import os import re import tqdm from .simpledicomanonymizer import * def anonymize(inputPath, outputPath, anonymizationActions): # Get input arguments InputFolder = '' OutputFolder = '' if os.path.isdir(inputPath): InputFolder = inputPath if os.path.isdir(outputPath): OutputFolder = outputPath if InputFolder == '': outputPath = OutputFolder + os.path.basename(inputPath) if InputFolder != '' and OutputFolder == '': print('Error, please set a correct output folder path') sys.exit() # Generate list of input file if a folder has been set inputFilesList = [] outputFilesList = [] if InputFolder == '': inputFilesList.append(inputPath) outputFilesList.append(outputPath) else: files = os.listdir(InputFolder) for fileName in files: inputFilesList.append(InputFolder + '/' + fileName) outputFilesList.append(OutputFolder + '/' + fileName) progressBar = tqdm.tqdm(total=len(inputFilesList)) for cpt in range(len(inputFilesList)): anonymizeDICOMFile(inputFilesList[cpt], outputFilesList[cpt], anonymizationActions) progressBar.update(1) progressBar.close() def generateActionsDictionary(mapActionTag, definedActionMap = {}): generatedMap = {} cpt = 0 for tag in mapActionTag: test = [tag] action = mapActionTag[tag] # Define the associated function to the tag if callable(action): actionFunction = action else: actionFunction = definedActionMap[action] if action in definedActionMap else eval(action) # Generate the map if cpt == 0: generatedMap = generateActions(test, actionFunction) else: generatedMap.update(generateActions(test, actionFunction)) cpt += 1 return generatedMap def main(definedActionMap = {}): parser = argparse.ArgumentParser(add_help=True) parser.add_argument('input', help='Path to the input dicom file or input directory which contains dicom files') parser.add_argument('output', help='Path to the output dicom file or output directory which will contains dicom files') parser.add_argument('-t', action='append', nargs='*', help='tags action : Defines a new action to apply on the tag.'\ '\'regexp\' action takes two arguments: '\ '1. regexp to find substring '\ '2. the string that will replace the previous found string') parser.add_argument('--dictionary', action='store', help='File which contains a dictionary that can be added to the original one') args = parser.parse_args() InputPath = args.input OutputPath = args.output # Create a new actions' dictionary from parameters newAnonymizationActions = {} cpt = 0 if args.t: numberOfNewTagsActions = len(args.t) if numberOfNewTagsActions > 0: for i in range(numberOfNewTagsActions): currentTagParameters = args.t[i] nbParameters = len(currentTagParameters) if nbParameters == 0: continue options = None actionName = currentTagParameters[1] # Means that we are in regexp mode if nbParameters == 4: options = { "find": currentTagParameters[2], "replace": currentTagParameters[3] } tagsList = [ast.literal_eval(currentTagParameters[0])] action = eval(actionName) # When generateActions is called and we have options, we don't want use regexp # as an action but we want to call it to generate a new method if options is not None: action = actionName if cpt == 0: newAnonymizationActions = generateActions(tagsList, action, options) else: newAnonymizationActions.update(generateActions(tagsList, action, options)) cpt += 1 # Read an existing dictionary if args.dictionary: with open(args.dictionary) as json_file: data = json.load(json_file) for key, value in data.items(): actionName = value options = None if type(value) is dict: actionName = value['action'] options = { "find": value['find'], "replace" : value['replace'] } l = [ast.literal_eval(key)] action = definedActionMap[actionName] if actionName in definedActionMap else eval(actionName) if cpt == 0: newAnonymizationActions = generateActions(l, action, options) else: newAnonymizationActions.update(generateActions(l, action, options)) cpt += 1 # Launch the anonymization anonymize(InputPath, OutputPath, newAnonymizationActions)
0.162579
0.18321
from djmodels.core.exceptions import ObjectDoesNotExist from djmodels.db.models import signals from djmodels.db.models.aggregates import * # NOQA from djmodels.db.models.aggregates import __all__ as aggregates_all from djmodels.db.models.constraints import * # NOQA from djmodels.db.models.constraints import __all__ as constraints_all from djmodels.db.models.deletion import ( CASCADE, DO_NOTHING, PROTECT, SET, SET_DEFAULT, SET_NULL, ProtectedError, ) from djmodels.db.models.expressions import ( Case, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, OuterRef, RowRange, Subquery, Value, ValueRange, When, Window, WindowFrame, ) from djmodels.db.models.fields import * # NOQA from djmodels.db.models.fields import __all__ as fields_all from djmodels.db.models.fields.files import FileField, ImageField from djmodels.db.models.fields.proxy import OrderWrt from djmodels.db.models.indexes import * # NOQA from djmodels.db.models.indexes import __all__ as indexes_all from djmodels.db.models.lookups import Lookup, Transform from djmodels.db.models.manager import Manager from djmodels.db.models.query import ( Prefetch, Q, QuerySet, prefetch_related_objects, ) from djmodels.db.models.query_utils import FilteredRelation # Imports that would create circular imports if sorted from djmodels.db.models.base import DEFERRED, Model # isort:skip from djmodels.db.models.fields.related import ( # isort:skip ForeignKey, ForeignObject, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel, ) __all__ = aggregates_all + constraints_all + fields_all + indexes_all __all__ += [ 'ObjectDoesNotExist', 'signals', 'CASCADE', 'DO_NOTHING', 'PROTECT', 'SET', 'SET_DEFAULT', 'SET_NULL', 'ProtectedError', 'Case', 'Exists', 'Expression', 'ExpressionList', 'ExpressionWrapper', 'F', 'Func', 'OuterRef', 'RowRange', 'Subquery', 'Value', 'ValueRange', 'When', 'Window', 'WindowFrame', 'FileField', 'ImageField', 'OrderWrt', 'Lookup', 'Transform', 'Manager', 'Prefetch', 'Q', 'QuerySet', 'prefetch_related_objects', 'DEFERRED', 'Model', 'FilteredRelation', 'ForeignKey', 'ForeignObject', 'OneToOneField', 'ManyToManyField', 'ManyToOneRel', 'ManyToManyRel', 'OneToOneRel', ]
djmodels/db/models/__init__.py
from djmodels.core.exceptions import ObjectDoesNotExist from djmodels.db.models import signals from djmodels.db.models.aggregates import * # NOQA from djmodels.db.models.aggregates import __all__ as aggregates_all from djmodels.db.models.constraints import * # NOQA from djmodels.db.models.constraints import __all__ as constraints_all from djmodels.db.models.deletion import ( CASCADE, DO_NOTHING, PROTECT, SET, SET_DEFAULT, SET_NULL, ProtectedError, ) from djmodels.db.models.expressions import ( Case, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, OuterRef, RowRange, Subquery, Value, ValueRange, When, Window, WindowFrame, ) from djmodels.db.models.fields import * # NOQA from djmodels.db.models.fields import __all__ as fields_all from djmodels.db.models.fields.files import FileField, ImageField from djmodels.db.models.fields.proxy import OrderWrt from djmodels.db.models.indexes import * # NOQA from djmodels.db.models.indexes import __all__ as indexes_all from djmodels.db.models.lookups import Lookup, Transform from djmodels.db.models.manager import Manager from djmodels.db.models.query import ( Prefetch, Q, QuerySet, prefetch_related_objects, ) from djmodels.db.models.query_utils import FilteredRelation # Imports that would create circular imports if sorted from djmodels.db.models.base import DEFERRED, Model # isort:skip from djmodels.db.models.fields.related import ( # isort:skip ForeignKey, ForeignObject, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel, ) __all__ = aggregates_all + constraints_all + fields_all + indexes_all __all__ += [ 'ObjectDoesNotExist', 'signals', 'CASCADE', 'DO_NOTHING', 'PROTECT', 'SET', 'SET_DEFAULT', 'SET_NULL', 'ProtectedError', 'Case', 'Exists', 'Expression', 'ExpressionList', 'ExpressionWrapper', 'F', 'Func', 'OuterRef', 'RowRange', 'Subquery', 'Value', 'ValueRange', 'When', 'Window', 'WindowFrame', 'FileField', 'ImageField', 'OrderWrt', 'Lookup', 'Transform', 'Manager', 'Prefetch', 'Q', 'QuerySet', 'prefetch_related_objects', 'DEFERRED', 'Model', 'FilteredRelation', 'ForeignKey', 'ForeignObject', 'OneToOneField', 'ManyToManyField', 'ManyToOneRel', 'ManyToManyRel', 'OneToOneRel', ]
0.638272
0.089335
import os import cv2 import sys import time import math import getopt import numpy as np import tensorflow as tf import tensorflow.keras.backend as K from utils import * from glob import glob from parser_test import parser from TrackNet import ResNet_Track from focal_loss import BinaryFocalLoss from collections import deque from tensorflow import keras args = parser.parse_args() tol = args.tol mag = args.mag sigma = args.sigma HEIGHT = args.HEIGHT WIDTH = args.WIDTH BATCH_SIZE = 1 FRAME_STACK = args.frame_stack load_weights = args.load_weights video_path = args.video_path csv_path = args.label_path opt = keras.optimizers.Adadelta(learning_rate=1.0) model=ResNet_Track(input_shape=(3, HEIGHT, WIDTH)) model.compile(loss=BinaryFocalLoss(gamma=2), optimizer=opt, metrics=[keras.metrics.BinaryAccuracy()]) try: model.load_weights(load_weights) print("Load weights successfully") except: print("Fail to load weights, please modify path in parser.py --load_weights") if not os.path.isfile(video_path) or not video_path.endswith('.mp4'): print("Not a valid video path! Please modify path in parser.py --video_path") sys.exit(1) else: # acquire video info cap = cv2.VideoCapture(video_path) fps = int(cap.get(cv2.CAP_PROP_FPS)) n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) video_name = os.path.split(video_path)[-1][:-4] if not os.path.isfile(csv_path) and not csv_path.endswith('.csv'): compute = False info = { idx:{ 'Frame': idx, 'Ball': 0, 'x': -1, 'y': -1 } for idx in range(n_frames) } print("Predict only, will not calculate accurracy") else: compute = True info = load_info(csv_path) if len(info) != n_frames: print("Number of frames in video and dictionary are not the same!") print("Fail to load, predict only.") compute = False info = { idx:{ 'Frame': idx, 'Ball': 0, 'x': -1, 'y': -1 } for idx in range(n_frames) } else: print("Load csv file successfully") print('Beginning predicting......') # img_input initialization gray_imgs = deque() success, image = cap.read() ratio = image.shape[0] / HEIGHT size = (int(WIDTH*ratio), int(HEIGHT*ratio)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(video_name+'_predict.mp4', fourcc, fps, size) out.write(image) # create a ball location CSV ball_loc = open(video_name + '_predict.csv', 'w') ball_loc.write('Frame, X, Y\n') img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) img = np.expand_dims(img, axis=2) gray_imgs.append(img) for _ in range(FRAME_STACK-1): success, image = cap.read() out.write(image) img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) img = np.expand_dims(img, axis=2) gray_imgs.append(img) frame_no = FRAME_STACK-1 time_list=[] TP = TN = FP1 = FP2 = FN = 0 while success: if frame_no == n_frames: break img_input = np.concatenate(gray_imgs, axis=2) img_input = cv2.resize(img_input, (WIDTH, HEIGHT)) img_input = np.moveaxis(img_input, -1, 0) img_input = np.expand_dims(img_input, axis=0) img_input = img_input.astype('float')/255. start = time.time() y_pred = model.predict(img_input, batch_size=BATCH_SIZE) end = time.time() time_list.append(end-start) y_pred = y_pred > 0.5 y_pred = y_pred.astype('float32') y_true = [] if info[frame_no]['Ball'] == 0: y_true.append(genHeatMap(WIDTH, HEIGHT, -1, -1, sigma, mag)) else: y_true.append(genHeatMap(WIDTH, HEIGHT, int(info[frame_no]['x']/ratio), int(info[frame_no]['y']/ratio), sigma, mag)) tp, tn, fp1, fp2, fn = confusion(y_pred, y_true, tol) TP += tp TN += tn FP1 += fp1 FP2 += fp2 FN += fn h_pred = y_pred[0]*255 h_pred = h_pred.astype('uint8') if np.amax(h_pred) <= 0: out.write(image) ball_loc.write('%d,,\n' % (frame_no)) else: # _, cnts, _ = cv2.findContours(h_pred[0].copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts, _ = cv2.findContours(h_pred[0].copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) rects = [cv2.boundingRect(ctr) for ctr in cnts] max_area_idx = 0 max_area = rects[max_area_idx][2] * rects[max_area_idx][3] for i in range(1, len(rects)): area = rects[i][2] * rects[i][3] if area > max_area: max_area_idx = i max_area = area target = rects[max_area_idx] (cx_pred, cy_pred) = (int(ratio*(target[0] + target[2] / 2)), int(ratio*(target[1] + target[3] / 2))) image_cp = np.copy(image) cv2.circle(image_cp, (cx_pred, cy_pred), 5, (0,0,255), -1) out.write(image_cp) ball_loc.write('%d, %d, %d\n' % (frame_no, cx_pred, cy_pred)) success, image = cap.read() if success: img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) img = np.expand_dims(img, axis=2) gray_imgs.append(img) gray_imgs.popleft() frame_no += 1 out.release() ball_loc.close() total_time = sum(time_list) if compute: print('==========================================================') accuracy, precision, recall = compute_acc((TP, TN, FP1, FP2, FN)) avg_acc = (accuracy + precision + recall)/3 print("Number of true positive:", TP) print("Number of true negative:", TN) print("Number of false positive FP1:", FP1) print("Number of false positive FP2:", FP2) print("Number of false negative:", FN) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("Total Time:", total_time) print('(ACC + Pre + Rec)/3:', avg_acc) print('Done......')
predict.py
import os import cv2 import sys import time import math import getopt import numpy as np import tensorflow as tf import tensorflow.keras.backend as K from utils import * from glob import glob from parser_test import parser from TrackNet import ResNet_Track from focal_loss import BinaryFocalLoss from collections import deque from tensorflow import keras args = parser.parse_args() tol = args.tol mag = args.mag sigma = args.sigma HEIGHT = args.HEIGHT WIDTH = args.WIDTH BATCH_SIZE = 1 FRAME_STACK = args.frame_stack load_weights = args.load_weights video_path = args.video_path csv_path = args.label_path opt = keras.optimizers.Adadelta(learning_rate=1.0) model=ResNet_Track(input_shape=(3, HEIGHT, WIDTH)) model.compile(loss=BinaryFocalLoss(gamma=2), optimizer=opt, metrics=[keras.metrics.BinaryAccuracy()]) try: model.load_weights(load_weights) print("Load weights successfully") except: print("Fail to load weights, please modify path in parser.py --load_weights") if not os.path.isfile(video_path) or not video_path.endswith('.mp4'): print("Not a valid video path! Please modify path in parser.py --video_path") sys.exit(1) else: # acquire video info cap = cv2.VideoCapture(video_path) fps = int(cap.get(cv2.CAP_PROP_FPS)) n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) video_name = os.path.split(video_path)[-1][:-4] if not os.path.isfile(csv_path) and not csv_path.endswith('.csv'): compute = False info = { idx:{ 'Frame': idx, 'Ball': 0, 'x': -1, 'y': -1 } for idx in range(n_frames) } print("Predict only, will not calculate accurracy") else: compute = True info = load_info(csv_path) if len(info) != n_frames: print("Number of frames in video and dictionary are not the same!") print("Fail to load, predict only.") compute = False info = { idx:{ 'Frame': idx, 'Ball': 0, 'x': -1, 'y': -1 } for idx in range(n_frames) } else: print("Load csv file successfully") print('Beginning predicting......') # img_input initialization gray_imgs = deque() success, image = cap.read() ratio = image.shape[0] / HEIGHT size = (int(WIDTH*ratio), int(HEIGHT*ratio)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(video_name+'_predict.mp4', fourcc, fps, size) out.write(image) # create a ball location CSV ball_loc = open(video_name + '_predict.csv', 'w') ball_loc.write('Frame, X, Y\n') img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) img = np.expand_dims(img, axis=2) gray_imgs.append(img) for _ in range(FRAME_STACK-1): success, image = cap.read() out.write(image) img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) img = np.expand_dims(img, axis=2) gray_imgs.append(img) frame_no = FRAME_STACK-1 time_list=[] TP = TN = FP1 = FP2 = FN = 0 while success: if frame_no == n_frames: break img_input = np.concatenate(gray_imgs, axis=2) img_input = cv2.resize(img_input, (WIDTH, HEIGHT)) img_input = np.moveaxis(img_input, -1, 0) img_input = np.expand_dims(img_input, axis=0) img_input = img_input.astype('float')/255. start = time.time() y_pred = model.predict(img_input, batch_size=BATCH_SIZE) end = time.time() time_list.append(end-start) y_pred = y_pred > 0.5 y_pred = y_pred.astype('float32') y_true = [] if info[frame_no]['Ball'] == 0: y_true.append(genHeatMap(WIDTH, HEIGHT, -1, -1, sigma, mag)) else: y_true.append(genHeatMap(WIDTH, HEIGHT, int(info[frame_no]['x']/ratio), int(info[frame_no]['y']/ratio), sigma, mag)) tp, tn, fp1, fp2, fn = confusion(y_pred, y_true, tol) TP += tp TN += tn FP1 += fp1 FP2 += fp2 FN += fn h_pred = y_pred[0]*255 h_pred = h_pred.astype('uint8') if np.amax(h_pred) <= 0: out.write(image) ball_loc.write('%d,,\n' % (frame_no)) else: # _, cnts, _ = cv2.findContours(h_pred[0].copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts, _ = cv2.findContours(h_pred[0].copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) rects = [cv2.boundingRect(ctr) for ctr in cnts] max_area_idx = 0 max_area = rects[max_area_idx][2] * rects[max_area_idx][3] for i in range(1, len(rects)): area = rects[i][2] * rects[i][3] if area > max_area: max_area_idx = i max_area = area target = rects[max_area_idx] (cx_pred, cy_pred) = (int(ratio*(target[0] + target[2] / 2)), int(ratio*(target[1] + target[3] / 2))) image_cp = np.copy(image) cv2.circle(image_cp, (cx_pred, cy_pred), 5, (0,0,255), -1) out.write(image_cp) ball_loc.write('%d, %d, %d\n' % (frame_no, cx_pred, cy_pred)) success, image = cap.read() if success: img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) img = np.expand_dims(img, axis=2) gray_imgs.append(img) gray_imgs.popleft() frame_no += 1 out.release() ball_loc.close() total_time = sum(time_list) if compute: print('==========================================================') accuracy, precision, recall = compute_acc((TP, TN, FP1, FP2, FN)) avg_acc = (accuracy + precision + recall)/3 print("Number of true positive:", TP) print("Number of true negative:", TN) print("Number of false positive FP1:", FP1) print("Number of false positive FP2:", FP2) print("Number of false negative:", FN) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("Total Time:", total_time) print('(ACC + Pre + Rec)/3:', avg_acc) print('Done......')
0.242206
0.139396
import os from sourcefile import SourceFile import argparse def main(url): files = [] for root, directories, filenames in os.walk(url): for filename in filenames: file = SourceFile(os.path.join(root, filename)) files.append(file) try: print("Parsing " + file.fullpath) file.parse() if len(file.profanewords) > 0: for index, word in enumerate(file.profanewords): print("Line " + str(file.profanelines[index] + 1) + ": " + word) print("Found " + str(len(file.profanewords)) + " words for a score of " + str(file.profanityscore)) print() except Exception as ex: print("Failed to parse file: ", ex) # Calculate and display statistics mostprofanefile = max(files, key=lambda curfile: len(curfile.profanewords)) from collections import Counter mostprofanewords = [] for file in files: word = file.favoriteprofaneword() if word is not None: mostprofanewords.append(word) if len(mostprofanewords) > 0: profanewords = Counter(mostprofanewords) mostcommonprofaneword = [elem[0] for elem in profanewords.most_common(1)][0] else: mostcommonprofaneword = "N/A" print() print("Total files scanned: " + str(len(files))) print("Words found: " + str(sum(file.profanewordcount[1] for file in files)) + " Mild, " + str(sum(file.profanewordcount[2] for file in files)) + " Medium, " + str(sum(file.profanewordcount[3] for file in files)) + " Strong, " + str(sum(file.profanewordcount[4] for file in files)) + " Very Strong") totalprofanityscore = sum(file.profanityscore for file in files) if totalprofanityscore > 0 : print("Most profane file: " + str(mostprofanefile.fullpath) + " with " + str(len(mostprofanefile.profanewords)) + " words for a score of " + str(mostprofanefile.profanityscore)) print("Most common word: " + mostcommonprofaneword) print("Total score: " + str(totalprofanityscore)) parser = argparse.ArgumentParser(description='Scan a directory for profanity.') parser.add_argument('dir', type=str, nargs=1, help='directory to scan') args = parser.parse_args() main(args.dir[0])
swearscan.py
import os from sourcefile import SourceFile import argparse def main(url): files = [] for root, directories, filenames in os.walk(url): for filename in filenames: file = SourceFile(os.path.join(root, filename)) files.append(file) try: print("Parsing " + file.fullpath) file.parse() if len(file.profanewords) > 0: for index, word in enumerate(file.profanewords): print("Line " + str(file.profanelines[index] + 1) + ": " + word) print("Found " + str(len(file.profanewords)) + " words for a score of " + str(file.profanityscore)) print() except Exception as ex: print("Failed to parse file: ", ex) # Calculate and display statistics mostprofanefile = max(files, key=lambda curfile: len(curfile.profanewords)) from collections import Counter mostprofanewords = [] for file in files: word = file.favoriteprofaneword() if word is not None: mostprofanewords.append(word) if len(mostprofanewords) > 0: profanewords = Counter(mostprofanewords) mostcommonprofaneword = [elem[0] for elem in profanewords.most_common(1)][0] else: mostcommonprofaneword = "N/A" print() print("Total files scanned: " + str(len(files))) print("Words found: " + str(sum(file.profanewordcount[1] for file in files)) + " Mild, " + str(sum(file.profanewordcount[2] for file in files)) + " Medium, " + str(sum(file.profanewordcount[3] for file in files)) + " Strong, " + str(sum(file.profanewordcount[4] for file in files)) + " Very Strong") totalprofanityscore = sum(file.profanityscore for file in files) if totalprofanityscore > 0 : print("Most profane file: " + str(mostprofanefile.fullpath) + " with " + str(len(mostprofanefile.profanewords)) + " words for a score of " + str(mostprofanefile.profanityscore)) print("Most common word: " + mostcommonprofaneword) print("Total score: " + str(totalprofanityscore)) parser = argparse.ArgumentParser(description='Scan a directory for profanity.') parser.add_argument('dir', type=str, nargs=1, help='directory to scan') args = parser.parse_args() main(args.dir[0])
0.201342
0.096663
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import os import functools from torch.autograd import Variable from util.image_pool import ImagePool from .base_model import BaseModel from . import networks import math class Mapping_Model_with_mask(nn.Module): def __init__(self, nc, mc=64, n_blocks=3, norm="instance", padding_type="reflect", opt=None): super(Mapping_Model_with_mask, self).__init__() norm_layer = networks.get_norm_layer(norm_type=norm) activation = nn.ReLU(True) model = [] tmp_nc = 64 n_up = 4 for i in range(n_up): ic = min(tmp_nc * (2 ** i), mc) oc = min(tmp_nc * (2 ** (i + 1)), mc) model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation] self.before_NL = nn.Sequential(*model) if opt.NL_res: self.NL = networks.NonLocalBlock2D_with_mask_Res( mc, mc, opt.NL_fusion_method, opt.correlation_renormalize, opt.softmax_temperature, opt.use_self, opt.cosin_similarity, ) print("You are using NL + Res") model = [] for i in range(n_blocks): model += [ networks.ResnetBlock( mc, padding_type=padding_type, activation=activation, norm_layer=norm_layer, opt=opt, dilation=opt.mapping_net_dilation, ) ] for i in range(n_up - 1): ic = min(64 * (2 ** (4 - i)), mc) oc = min(64 * (2 ** (3 - i)), mc) model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation] model += [nn.Conv2d(tmp_nc * 2, tmp_nc, 3, 1, 1)] if opt.feat_dim > 0 and opt.feat_dim < 64: model += [norm_layer(tmp_nc), activation, nn.Conv2d(tmp_nc, opt.feat_dim, 1, 1)] # model += [nn.Conv2d(64, 1, 1, 1, 0)] self.after_NL = nn.Sequential(*model) def forward(self, input, mask): x1 = self.before_NL(input) del input x2 = self.NL(x1, mask) del x1, mask x3 = self.after_NL(x2) del x2 return x3
Global/models/NonLocal_feature_mapping_model.py
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import os import functools from torch.autograd import Variable from util.image_pool import ImagePool from .base_model import BaseModel from . import networks import math class Mapping_Model_with_mask(nn.Module): def __init__(self, nc, mc=64, n_blocks=3, norm="instance", padding_type="reflect", opt=None): super(Mapping_Model_with_mask, self).__init__() norm_layer = networks.get_norm_layer(norm_type=norm) activation = nn.ReLU(True) model = [] tmp_nc = 64 n_up = 4 for i in range(n_up): ic = min(tmp_nc * (2 ** i), mc) oc = min(tmp_nc * (2 ** (i + 1)), mc) model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation] self.before_NL = nn.Sequential(*model) if opt.NL_res: self.NL = networks.NonLocalBlock2D_with_mask_Res( mc, mc, opt.NL_fusion_method, opt.correlation_renormalize, opt.softmax_temperature, opt.use_self, opt.cosin_similarity, ) print("You are using NL + Res") model = [] for i in range(n_blocks): model += [ networks.ResnetBlock( mc, padding_type=padding_type, activation=activation, norm_layer=norm_layer, opt=opt, dilation=opt.mapping_net_dilation, ) ] for i in range(n_up - 1): ic = min(64 * (2 ** (4 - i)), mc) oc = min(64 * (2 ** (3 - i)), mc) model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation] model += [nn.Conv2d(tmp_nc * 2, tmp_nc, 3, 1, 1)] if opt.feat_dim > 0 and opt.feat_dim < 64: model += [norm_layer(tmp_nc), activation, nn.Conv2d(tmp_nc, opt.feat_dim, 1, 1)] # model += [nn.Conv2d(64, 1, 1, 1, 0)] self.after_NL = nn.Sequential(*model) def forward(self, input, mask): x1 = self.before_NL(input) del input x2 = self.NL(x1, mask) del x1, mask x3 = self.after_NL(x2) del x2 return x3
0.803212
0.310524
import six from flask_sqlalchemy import SQLAlchemy from flask_inspektor import QueryInspector from tests.base import FakeAppTestCase class QueryInspectBasicTest(FakeAppTestCase): def test_not_initialised_by_default(self): # Bootstrap the extension. QueryInspector(self.app) # Default configuration doesn't allow initialisation. extensions = getattr(self.app, 'extensions') self.assertNotIn('qi', extensions) def test_initialised(self): self.app.config['QUERYINSPECT_ENABLED'] = True QueryInspector(self.app) self.assertTrue(self.app.extensions['qi']) def test_initialised_lazy(self): qi = QueryInspector() self.app.config['QUERYINSPECT_ENABLED'] = True qi.init_app(self.app) self.assertTrue(self.app.extensions['qi']) class QueryInspectTimingTest(FakeAppTestCase): def test_no_queries(self): self.app.config['QUERYINSPECT_ENABLED'] = True self.app.config['QUERYINSPECT_HEADERS'] = True QueryInspector(self.app) headers = self.client.get('/').headers self.assertIn('X-QueryInspector', headers) value = headers['X-QueryInspector'] self.assertIn('count#qi.reads=0', value) self.assertIn('count#qi.writes=0', value) self.assertIn('count#qi.conns=0', value) def test_slow_view(self): self.app.config['QUERYINSPECT_ENABLED'] = True self.app.config['QUERYINSPECT_HEADERS'] = True QueryInspector(self.app) headers = self.client.get('/slow').headers value = headers['X-QueryInspector'] six.assertRegex(self, value, r'measure#qi\.r_time=1[\d]{2}\.[\d]ms') class QueryInspectSQLTest(FakeAppTestCase): def setUp(self): # Fake DB setup for testing ORM-related things. self.db = db = SQLAlchemy(self.app) class TestModel(db.Model): id = db.Column(db.Integer, primary_key=True) foo = db.Column(db.String(8)) self.Model = TestModel db.create_all(app=self.app) def test_db_reads(self): def _read_view(): # Simulate SQL SELECT query. self.Model.query.first() return 'read' self.app.add_url_rule('/read', 'read', _read_view) self.app.config['QUERYINSPECT_ENABLED'] = True self.app.config['QUERYINSPECT_HEADERS'] = True QueryInspector(self.app) headers = self.client.get('/read').headers value = headers['X-QueryInspector'] self.assertIn('count#qi.reads=1', value) def test_db_writes(self): def _write_view(): # Simulate SQL INSERT query. m = self.Model(foo='bar') self.db.session.add(m) self.db.session.commit() return 'write' self.app.add_url_rule('/write', 'write', _write_view) self.app.config['QUERYINSPECT_ENABLED'] = True self.app.config['QUERYINSPECT_HEADERS'] = True QueryInspector(self.app) headers = self.client.get('/write').headers value = headers['X-QueryInspector'] self.assertIn('count#qi.writes=1', value)
tests/test_extension.py
import six from flask_sqlalchemy import SQLAlchemy from flask_inspektor import QueryInspector from tests.base import FakeAppTestCase class QueryInspectBasicTest(FakeAppTestCase): def test_not_initialised_by_default(self): # Bootstrap the extension. QueryInspector(self.app) # Default configuration doesn't allow initialisation. extensions = getattr(self.app, 'extensions') self.assertNotIn('qi', extensions) def test_initialised(self): self.app.config['QUERYINSPECT_ENABLED'] = True QueryInspector(self.app) self.assertTrue(self.app.extensions['qi']) def test_initialised_lazy(self): qi = QueryInspector() self.app.config['QUERYINSPECT_ENABLED'] = True qi.init_app(self.app) self.assertTrue(self.app.extensions['qi']) class QueryInspectTimingTest(FakeAppTestCase): def test_no_queries(self): self.app.config['QUERYINSPECT_ENABLED'] = True self.app.config['QUERYINSPECT_HEADERS'] = True QueryInspector(self.app) headers = self.client.get('/').headers self.assertIn('X-QueryInspector', headers) value = headers['X-QueryInspector'] self.assertIn('count#qi.reads=0', value) self.assertIn('count#qi.writes=0', value) self.assertIn('count#qi.conns=0', value) def test_slow_view(self): self.app.config['QUERYINSPECT_ENABLED'] = True self.app.config['QUERYINSPECT_HEADERS'] = True QueryInspector(self.app) headers = self.client.get('/slow').headers value = headers['X-QueryInspector'] six.assertRegex(self, value, r'measure#qi\.r_time=1[\d]{2}\.[\d]ms') class QueryInspectSQLTest(FakeAppTestCase): def setUp(self): # Fake DB setup for testing ORM-related things. self.db = db = SQLAlchemy(self.app) class TestModel(db.Model): id = db.Column(db.Integer, primary_key=True) foo = db.Column(db.String(8)) self.Model = TestModel db.create_all(app=self.app) def test_db_reads(self): def _read_view(): # Simulate SQL SELECT query. self.Model.query.first() return 'read' self.app.add_url_rule('/read', 'read', _read_view) self.app.config['QUERYINSPECT_ENABLED'] = True self.app.config['QUERYINSPECT_HEADERS'] = True QueryInspector(self.app) headers = self.client.get('/read').headers value = headers['X-QueryInspector'] self.assertIn('count#qi.reads=1', value) def test_db_writes(self): def _write_view(): # Simulate SQL INSERT query. m = self.Model(foo='bar') self.db.session.add(m) self.db.session.commit() return 'write' self.app.add_url_rule('/write', 'write', _write_view) self.app.config['QUERYINSPECT_ENABLED'] = True self.app.config['QUERYINSPECT_HEADERS'] = True QueryInspector(self.app) headers = self.client.get('/write').headers value = headers['X-QueryInspector'] self.assertIn('count#qi.writes=1', value)
0.545165
0.287461
import os import numpy as np import tensorflow as tf import dataIO as d from tqdm import * ''' Global Parameters ''' n_epochs = 10 batch_size = 64 g_lr = 0.0025 d_lr = 0.00001 beta = 0.5 alpha_d = 0.0015 alpha_g = 0.000025 d_thresh = 0.8 z_size = 100 obj = 'chair' train_sample_directory = './train_sample/' model_directory = './models/' is_local = True weights, biases = {}, {} def generator(z, batch_size=batch_size, phase_train=True, reuse=False): strides = [1,2,2,2,1] g_1 = tf.add(tf.matmul(z, weights['wg1']), biases['bg1']) g_1 = tf.reshape(g_1, [-1,4,4,4,512]) g_1 = tf.contrib.layers.batch_norm(g_1, is_training=phase_train) g_2 = tf.nn.conv3d_transpose(g_1, weights['wg2'], output_shape=[batch_size,8,8,8,256], strides=strides, padding="SAME") g_2 = tf.nn.bias_add(g_2, biases['bg2']) g_2 = tf.contrib.layers.batch_norm(g_2, is_training=phase_train) g_2 = tf.nn.relu(g_2) g_3 = tf.nn.conv3d_transpose(g_2, weights['wg3'], output_shape=[batch_size,16,16,16,128], strides=strides, padding="SAME") g_3 = tf.nn.bias_add(g_3, biases['bg3']) g_3 = tf.contrib.layers.batch_norm(g_3, is_training=phase_train) g_3 = tf.nn.relu(g_3) g_4 = tf.nn.conv3d_transpose(g_3, weights['wg4'], output_shape=[batch_size,32,32,32,1], strides=strides, padding="SAME") g_4 = tf.nn.bias_add(g_4, biases['bg4']) g_4 = tf.nn.sigmoid(g_4) return g_4 def discriminator(inputs, phase_train=True, reuse=False): strides = [1,2,2,2,1] d_1 = tf.nn.conv3d(inputs, weights['wd1'], strides=strides, padding="SAME") d_1 = tf.nn.bias_add(d_1, biases['bd1']) d_1 = tf.contrib.layers.batch_norm(d_1, is_training=phase_train) d_1 = tf.nn.relu(d_1) d_2 = tf.nn.conv3d(d_1, weights['wd2'], strides=strides, padding="SAME") d_2 = tf.nn.bias_add(d_2, biases['bd2']) d_2 = tf.contrib.layers.batch_norm(d_2, is_training=phase_train) d_2 = tf.nn.relu(d_2) d_3 = tf.nn.conv3d(d_2, weights['wd3'], strides=strides, padding="SAME") d_3 = tf.nn.bias_add(d_3, biases['bd3']) d_3 = tf.contrib.layers.batch_norm(d_3, is_training=phase_train) d_3 = tf.nn.relu(d_3) d_4 = tf.nn.conv3d(d_3, weights['wd4'], strides=strides, padding="SAME") d_4 = tf.nn.bias_add(d_4, biases['bd4']) d_4 = tf.contrib.layers.batch_norm(d_4, is_training=phase_train) d_4 = tf.nn.relu(d_4) shape = d_4.get_shape().as_list() dim = np.prod(shape[1:]) d_5 = tf.reshape(d_4, shape=[-1, dim]) d_5 = tf.add(tf.matmul(d_5, weights['wd5']), biases['bd5']) return d_5 def initialiseWeights(): global weights xavier_init = tf.contrib.layers.xavier_initializer() # filter for deconv3d: A 5-D Tensor with the same type as value and shape [depth, height, width, output_channels, in_channels] weights['wg1'] = tf.get_variable("wg1", shape=[z_size, 4*4*4*512], initializer=xavier_init) weights['wg2'] = tf.get_variable("wg2", shape=[4, 4, 4, 256, 512], initializer=xavier_init) weights['wg3'] = tf.get_variable("wg3", shape=[4, 4, 4, 128, 256], initializer=xavier_init) weights['wg4'] = tf.get_variable("wg4", shape=[4, 4, 4, 1, 128 ], initializer=xavier_init) weights['wd1'] = tf.get_variable("wd1", shape=[4, 4, 4, 1, 32], initializer=xavier_init) weights['wd2'] = tf.get_variable("wd2", shape=[4, 4, 4, 32, 64], initializer=xavier_init) weights['wd3'] = tf.get_variable("wd3", shape=[4, 4, 4, 64, 128], initializer=xavier_init) weights['wd4'] = tf.get_variable("wd4", shape=[2, 2, 2, 128, 256], initializer=xavier_init) weights['wd5'] = tf.get_variable("wd5", shape=[2* 2* 2* 256, 1 ], initializer=xavier_init) return weights def initialiseBiases(): global biases zero_init = tf.zeros_initializer() biases['bg1'] = tf.get_variable("bg1", shape=[4*4*4*512], initializer=zero_init) biases['bg2'] = tf.get_variable("bg2", shape=[256], initializer=zero_init) biases['bg3'] = tf.get_variable("bg3", shape=[128], initializer=zero_init) biases['bg4'] = tf.get_variable("bg4", shape=[ 1 ], initializer=zero_init) biases['bd1'] = tf.get_variable("bd1", shape=[32], initializer=zero_init) biases['bd2'] = tf.get_variable("bd2", shape=[64], initializer=zero_init) biases['bd3'] = tf.get_variable("bd3", shape=[128], initializer=zero_init) biases['bd4'] = tf.get_variable("bd4", shape=[256], initializer=zero_init) biases['bd5'] = tf.get_variable("bd5", shape=[1 ], initializer=zero_init) return biases def trainGAN(): weights, biases = initialiseWeights(), initialiseBiases() z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) x_vector = tf.placeholder(shape=[batch_size,32,32,32,1],dtype=tf.float32) net_g_train = generator(z_vector, phase_train=True, reuse=False) d_output_x = discriminator(x_vector, phase_train=True, reuse=False) d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01) summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x) d_output_z = discriminator(net_g_train, phase_train=True, reuse=True) d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01) summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z) d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z)) summary_d_loss = tf.summary.scalar("d_loss", d_loss) g_loss = -tf.reduce_mean(tf.log(d_output_z)) summary_g_loss = tf.summary.scalar("g_loss", g_loss) net_g_test = generator(z_vector, phase_train=True, reuse=True) para_g=list(np.array(tf.trainable_variables())[[0,1,4,5,8,9,12,13]]) para_d=list(np.array(tf.trainable_variables())[[14,15,16,17,20,21,24,25]])#,28,29]]) # only update the weights for the discriminator network optimizer_op_d = tf.train.AdamOptimizer(learning_rate=alpha_d,beta1=beta).minimize(d_loss,var_list=para_d) # only update the weights for the generator network optimizer_op_g = tf.train.AdamOptimizer(learning_rate=alpha_g,beta1=beta).minimize(g_loss,var_list=para_g) saver = tf.train.Saver(max_to_keep=50) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) z_sample = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32) volumes = d.getAll(obj=obj, train=True, is_local=True) volumes = volumes[...,np.newaxis].astype(np.float) for epoch in tqdm(range(n_epochs)): idx = np.random.randint(len(volumes), size=batch_size) x = volumes[idx] z = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32) # Update the discriminator and generator d_summary_merge = tf.summary.merge([summary_d_loss, summary_d_x_hist,summary_d_z_hist]) summary_d, discriminator_loss = sess.run([d_summary_merge,d_loss],feed_dict={z_vector:z, x_vector:x}) summary_g, generator_loss = sess.run([summary_g_loss,g_loss],feed_dict={z_vector:z}) if discriminator_loss <= 4.6*0.1: sess.run([optimizer_op_g],feed_dict={z_vector:z}) elif generator_loss <= 4.6*0.1: sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x}) else: sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x}) sess.run([optimizer_op_g],feed_dict={z_vector:z}) print "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss # output generated chairs if epoch % 500 == 10: g_chairs = sess.run(net_g_test,feed_dict={z_vector:z_sample}) if not os.path.exists(train_sample_directory): os.makedirs(train_sample_directory) g_chairs.dump(train_sample_directory+'/'+str(epoch)) if epoch % 500 == 10: if not os.path.exists(model_directory): os.makedirs(model_directory) saver.save(sess, save_path = model_directory + '/' + str(epoch) + '.cptk') def testGAN(): ## TODO pass def visualize(): ## TODO pass def saveModel(): ## TODO pass if __name__ == '__main__': trainGAN()
src/3dgan.py
import os import numpy as np import tensorflow as tf import dataIO as d from tqdm import * ''' Global Parameters ''' n_epochs = 10 batch_size = 64 g_lr = 0.0025 d_lr = 0.00001 beta = 0.5 alpha_d = 0.0015 alpha_g = 0.000025 d_thresh = 0.8 z_size = 100 obj = 'chair' train_sample_directory = './train_sample/' model_directory = './models/' is_local = True weights, biases = {}, {} def generator(z, batch_size=batch_size, phase_train=True, reuse=False): strides = [1,2,2,2,1] g_1 = tf.add(tf.matmul(z, weights['wg1']), biases['bg1']) g_1 = tf.reshape(g_1, [-1,4,4,4,512]) g_1 = tf.contrib.layers.batch_norm(g_1, is_training=phase_train) g_2 = tf.nn.conv3d_transpose(g_1, weights['wg2'], output_shape=[batch_size,8,8,8,256], strides=strides, padding="SAME") g_2 = tf.nn.bias_add(g_2, biases['bg2']) g_2 = tf.contrib.layers.batch_norm(g_2, is_training=phase_train) g_2 = tf.nn.relu(g_2) g_3 = tf.nn.conv3d_transpose(g_2, weights['wg3'], output_shape=[batch_size,16,16,16,128], strides=strides, padding="SAME") g_3 = tf.nn.bias_add(g_3, biases['bg3']) g_3 = tf.contrib.layers.batch_norm(g_3, is_training=phase_train) g_3 = tf.nn.relu(g_3) g_4 = tf.nn.conv3d_transpose(g_3, weights['wg4'], output_shape=[batch_size,32,32,32,1], strides=strides, padding="SAME") g_4 = tf.nn.bias_add(g_4, biases['bg4']) g_4 = tf.nn.sigmoid(g_4) return g_4 def discriminator(inputs, phase_train=True, reuse=False): strides = [1,2,2,2,1] d_1 = tf.nn.conv3d(inputs, weights['wd1'], strides=strides, padding="SAME") d_1 = tf.nn.bias_add(d_1, biases['bd1']) d_1 = tf.contrib.layers.batch_norm(d_1, is_training=phase_train) d_1 = tf.nn.relu(d_1) d_2 = tf.nn.conv3d(d_1, weights['wd2'], strides=strides, padding="SAME") d_2 = tf.nn.bias_add(d_2, biases['bd2']) d_2 = tf.contrib.layers.batch_norm(d_2, is_training=phase_train) d_2 = tf.nn.relu(d_2) d_3 = tf.nn.conv3d(d_2, weights['wd3'], strides=strides, padding="SAME") d_3 = tf.nn.bias_add(d_3, biases['bd3']) d_3 = tf.contrib.layers.batch_norm(d_3, is_training=phase_train) d_3 = tf.nn.relu(d_3) d_4 = tf.nn.conv3d(d_3, weights['wd4'], strides=strides, padding="SAME") d_4 = tf.nn.bias_add(d_4, biases['bd4']) d_4 = tf.contrib.layers.batch_norm(d_4, is_training=phase_train) d_4 = tf.nn.relu(d_4) shape = d_4.get_shape().as_list() dim = np.prod(shape[1:]) d_5 = tf.reshape(d_4, shape=[-1, dim]) d_5 = tf.add(tf.matmul(d_5, weights['wd5']), biases['bd5']) return d_5 def initialiseWeights(): global weights xavier_init = tf.contrib.layers.xavier_initializer() # filter for deconv3d: A 5-D Tensor with the same type as value and shape [depth, height, width, output_channels, in_channels] weights['wg1'] = tf.get_variable("wg1", shape=[z_size, 4*4*4*512], initializer=xavier_init) weights['wg2'] = tf.get_variable("wg2", shape=[4, 4, 4, 256, 512], initializer=xavier_init) weights['wg3'] = tf.get_variable("wg3", shape=[4, 4, 4, 128, 256], initializer=xavier_init) weights['wg4'] = tf.get_variable("wg4", shape=[4, 4, 4, 1, 128 ], initializer=xavier_init) weights['wd1'] = tf.get_variable("wd1", shape=[4, 4, 4, 1, 32], initializer=xavier_init) weights['wd2'] = tf.get_variable("wd2", shape=[4, 4, 4, 32, 64], initializer=xavier_init) weights['wd3'] = tf.get_variable("wd3", shape=[4, 4, 4, 64, 128], initializer=xavier_init) weights['wd4'] = tf.get_variable("wd4", shape=[2, 2, 2, 128, 256], initializer=xavier_init) weights['wd5'] = tf.get_variable("wd5", shape=[2* 2* 2* 256, 1 ], initializer=xavier_init) return weights def initialiseBiases(): global biases zero_init = tf.zeros_initializer() biases['bg1'] = tf.get_variable("bg1", shape=[4*4*4*512], initializer=zero_init) biases['bg2'] = tf.get_variable("bg2", shape=[256], initializer=zero_init) biases['bg3'] = tf.get_variable("bg3", shape=[128], initializer=zero_init) biases['bg4'] = tf.get_variable("bg4", shape=[ 1 ], initializer=zero_init) biases['bd1'] = tf.get_variable("bd1", shape=[32], initializer=zero_init) biases['bd2'] = tf.get_variable("bd2", shape=[64], initializer=zero_init) biases['bd3'] = tf.get_variable("bd3", shape=[128], initializer=zero_init) biases['bd4'] = tf.get_variable("bd4", shape=[256], initializer=zero_init) biases['bd5'] = tf.get_variable("bd5", shape=[1 ], initializer=zero_init) return biases def trainGAN(): weights, biases = initialiseWeights(), initialiseBiases() z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) x_vector = tf.placeholder(shape=[batch_size,32,32,32,1],dtype=tf.float32) net_g_train = generator(z_vector, phase_train=True, reuse=False) d_output_x = discriminator(x_vector, phase_train=True, reuse=False) d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01) summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x) d_output_z = discriminator(net_g_train, phase_train=True, reuse=True) d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01) summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z) d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z)) summary_d_loss = tf.summary.scalar("d_loss", d_loss) g_loss = -tf.reduce_mean(tf.log(d_output_z)) summary_g_loss = tf.summary.scalar("g_loss", g_loss) net_g_test = generator(z_vector, phase_train=True, reuse=True) para_g=list(np.array(tf.trainable_variables())[[0,1,4,5,8,9,12,13]]) para_d=list(np.array(tf.trainable_variables())[[14,15,16,17,20,21,24,25]])#,28,29]]) # only update the weights for the discriminator network optimizer_op_d = tf.train.AdamOptimizer(learning_rate=alpha_d,beta1=beta).minimize(d_loss,var_list=para_d) # only update the weights for the generator network optimizer_op_g = tf.train.AdamOptimizer(learning_rate=alpha_g,beta1=beta).minimize(g_loss,var_list=para_g) saver = tf.train.Saver(max_to_keep=50) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) z_sample = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32) volumes = d.getAll(obj=obj, train=True, is_local=True) volumes = volumes[...,np.newaxis].astype(np.float) for epoch in tqdm(range(n_epochs)): idx = np.random.randint(len(volumes), size=batch_size) x = volumes[idx] z = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32) # Update the discriminator and generator d_summary_merge = tf.summary.merge([summary_d_loss, summary_d_x_hist,summary_d_z_hist]) summary_d, discriminator_loss = sess.run([d_summary_merge,d_loss],feed_dict={z_vector:z, x_vector:x}) summary_g, generator_loss = sess.run([summary_g_loss,g_loss],feed_dict={z_vector:z}) if discriminator_loss <= 4.6*0.1: sess.run([optimizer_op_g],feed_dict={z_vector:z}) elif generator_loss <= 4.6*0.1: sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x}) else: sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x}) sess.run([optimizer_op_g],feed_dict={z_vector:z}) print "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss # output generated chairs if epoch % 500 == 10: g_chairs = sess.run(net_g_test,feed_dict={z_vector:z_sample}) if not os.path.exists(train_sample_directory): os.makedirs(train_sample_directory) g_chairs.dump(train_sample_directory+'/'+str(epoch)) if epoch % 500 == 10: if not os.path.exists(model_directory): os.makedirs(model_directory) saver.save(sess, save_path = model_directory + '/' + str(epoch) + '.cptk') def testGAN(): ## TODO pass def visualize(): ## TODO pass def saveModel(): ## TODO pass if __name__ == '__main__': trainGAN()
0.685002
0.361052
import os import sys import tvm import argparse import numpy as np from tvm import te from tvm import topi from tvm import auto_scheduler parser = argparse.ArgumentParser(description='tvm op test!') parser.add_argument("--op", type=str, default="normalization", help="[reduce, element_wise, normalization]") parser.add_argument("--axis", type=int, default=0, help="reduce axis!") pargs = parser.parse_args() @auto_scheduler.register_workload def tvm_batch_normalization(n, c, h, w): A = te.placeholder((n, c, h, w), name = 'A') B = topi.transpose(A, (0, 2, 3, 1)) C = topi.reshape(B, [-1, c]) D = topi.sum(C, axis=0, keepdims=True) E = D / (n*h*w) F = topi.subtract(C, E) G = topi.multiply(F, F) H = topi.sum(G, axis=0, keepdims=True) I = H / (n*h*w) J = topi.sqrt(I) K = topi.divide(F, J) L = topi.reshape(K, (n, h, w, c)) M = topi.transpose(L, (0, 3, 1, 2)) mean = te.placeholder((1, c), name = 'mean') var = te.placeholder((1, c), name = 'var') m = mean * 0.9 + E * 0.1 v = var * 0.9 + J * 0.1 return [A, mean, var, M, m, v] @auto_scheduler.register_workload def tvm_normalization(c, h, w, axis): A = te.placeholder((c,h,w), name = 'A') B = topi.sum(A, axis=axis, keepdims=True) C = B / c D = topi.subtract(A, C) E = topi.multiply(D, D) F = topi.sum(E, axis=axis, keepdims=True) G = F / c H = topi.sqrt(G) I = topi.divide(D, H) return [A,I] @auto_scheduler.register_workload def tvm_reduce(c, h, w, axis): A = te.placeholder((c, h ,w), name = 'A') B = topi.sum(A, axis=axis, keepdims=True) if axis == 0: C = B / c elif axis == 1: C = B / h else: C = B / w return [A,C] @auto_scheduler.register_workload def tvm_fuse_elementwise(c1, h1, w1, c2, h2, w2, c3, h3, w3, c4, h4, w4): A = te.placeholder((c1, h1, w1), name = 'A') B = te.placeholder((c2, h2, w2), name = 'B') C = te.placeholder((c3, h3, w3), name = 'C') D = te.placeholder((c4, h4, w4), name = 'D') E = topi.add(A, B) F = topi.subtract(E, C) G = topi.multiply(F, D) return [A,B,C,D,G] target = tvm.target.Target("cuda") def batch_normalization(): n, c, h, w = 128, 256, 32, 32 task = auto_scheduler.SearchTask(func = tvm_batch_normalization, args = (n, c, h, w), target = target) return task def normalization(axis): if(axis > 2): sys.exit("axis should be less than 3") c,h,w = 128,1024,256 task = auto_scheduler.SearchTask(func = tvm_normalization, args = (c,h,w,axis), target = target) return task def reduce(axis): if(axis > 2): sys.exit("axis should be less than 3") c, h, w = 128, 1024, 256 task = auto_scheduler.SearchTask(func = tvm_reduce, args = (c, h, w, axis), target = target) return task def element_wise(): c1,h1,w1 = 128, 512, 1024 c2,h2,w2 = 1, 512, 1024 c3,h3,w3 = 128, 1, 1024 c4,h4,w4 = 128, 512, 1 task = auto_scheduler.SearchTask(func = tvm_fuse_elementwise, args = (c1,h1,w1,c2,h2,w2,c3,h3,w3,c4,h4,w4), target = target) return task if pargs.op == "normalization": task = normalization(pargs.axis) elif pargs.op == "reduce": task = reduce(pargs.axis) elif pargs.op == "element_wise": task = element_wise() elif pargs.op == "batch_normalization": task = batch_normalization() else: sys.exit("op is unkown!") print(task.compute_dag) log_file = "des.json" measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300) tune_option = auto_scheduler.TuningOptions( num_measure_trials=64, # change this to 1000 to achieve the best performance runner=measure_ctx.runner, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], verbose=2, ) # Run auto-tuning (search) task.tune(tune_option) # Apply the best schedule sch, args = task.apply_best(log_file) # Kill the measurement process del measure_ctx print("Lowered TIR:") print(tvm.lower(sch, args, simple_mode=True)) mod = tvm.build(sch, args, target) #mod.save("normalize.o") #mod = tvm.load_module("normalize.tvm"); # Check correctness def run_normalization(mod): c, h, w = 128, 1024, 256 A = np.random.uniform(size=(c, h, w)).astype(np.float32) O = np.zeros(shape=(c, h, w), dtype=np.float32) dev = tvm.gpu() tvm_A = tvm.nd.array(A, device=dev) tvm_O = tvm.nd.empty(O.shape, device=dev) for i in range(10): mod(tvm_A, tvm_O) def run_batch_normalization(mod): n, c, h, w = 128, 256, 32, 32 A = np.random.uniform(size=(n, c, h, w)).astype(np.float32) mean = np.random.uniform(size=(1, c)).astype(np.float32) var = np.random.uniform(size=(1, c)).astype(np.float32) O = np.zeros(shape=(n, c, h, w), dtype=np.float32) m = np.zeros(shape=(1, c), dtype=np.float32) v = np.zeros(shape=(1, c), dtype=np.float32) dev = tvm.gpu() tvm_A = tvm.nd.array(A, device=dev) tvm_mean = tvm.nd.array(mean, device=dev) tvm_var = tvm.nd.array(var, device=dev) tvm_O = tvm.nd.empty(O.shape, device=dev) tvm_m = tvm.nd.empty(m.shape, device=dev) tvm_v = tvm.nd.empty(v.shape, device=dev) for i in range(10): mod(tvm_A, tvm_mean, tvm_var, tvm_O, tvm_m, tvm_v) def run_element_wise(mod): c1,h1,w1 = 128, 512, 1024 c2,h2,w2 = 1, 512, 1024 c3,h3,w3 = 128, 1 , 1024 c4,h4,w4 = 128, 512, 1 A = np.random.uniform(size=(c1, h1, w1)).astype(np.float32) B = np.random.uniform(size=(c2, h2, w2)).astype(np.float32) C = np.random.uniform(size=(c3, h3, w3)).astype(np.float32) D = np.random.uniform(size=(c4, h4, w4)).astype(np.float32) O = np.zeros(shape=(c1, h1, w1), dtype=np.float32) dev = tvm.gpu() tvm_A = tvm.nd.array(A, device=dev) tvm_B = tvm.nd.array(B, device=dev) tvm_C = tvm.nd.array(C, device=dev) tvm_D = tvm.nd.array(D, device=dev) tvm_O = tvm.nd.empty(O.shape, device=dev) for i in range(10): mod(tvm_A, tvm_B, tvm_C, tvm_D, tvm_O) def run_reduce(mod, axis): c, h, w = 128, 1024, 256 A = np.random.uniform(size=(c, h, w)).astype(np.float32) if axis == 0: O = np.zeros(shape=(1, h, w), dtype=np.float32) elif axis == 1: O = np.zeros(shape=(c, 1, w), dtype=np.float32) elif axis == 2: O = np.zeros(shape=(c, h, 1), dtype=np.float32) dev = tvm.gpu() tvm_A = tvm.nd.array(A, device=dev) tvm_O = tvm.nd.empty(O.shape, device=dev) for i in range(10): mod(tvm_A, tvm_O) if pargs.op == "batch_normalization": run_batch_normalization(mod) elif pargs.op == "normalization": run_normalization(mod) elif pargs.op == "reduce": run_reduce(mod, pargs.axis) elif pargs.op == "element_wise": run_element_wise(mod) #run_element_wise(mod) #A = te.placeholder((B,N), name = 'A') #Alpha = te.placeholder((N,), name = 'Alpha') #Beta = te.placeholder((N,), name = "Beta") #C = te.compute((B,N), lambda x,y: A[x,y] + Alpha[y], name='C') #D = te.compute((B,N), lambda x,y: C[x,y] * Beta[y], name='D') #S = te.create_schedule(D.op) #S[C].compute_inline() #S[D].bind(D.op.axis[0], te.thread_axis("blockIdx.x")) #S[D].bind(D.op.axis[1], te.thread_axis("threadIdx.x")) #ir_mod = tvm.lower(S, [A, Alpha, Beta, C, D], simple_mode=True,name='x') #mod = tvm.build(S, [A, Alpha, Beta, C, D], target='cuda', target_host="c", name='xx') #print(ir_mod.astext(show_meta_data=True)) #print(mod.get_source()) for sub_mod in mod.imported_modules: print(sub_mod.get_source("cu")) # print tir #print("tir:\n", ir_m.astext(show_meta_data=True)) # print source code #print("source code:\n",rt_m.get_source())
op_benchmark/tvm_op_autosheduler.py
import os import sys import tvm import argparse import numpy as np from tvm import te from tvm import topi from tvm import auto_scheduler parser = argparse.ArgumentParser(description='tvm op test!') parser.add_argument("--op", type=str, default="normalization", help="[reduce, element_wise, normalization]") parser.add_argument("--axis", type=int, default=0, help="reduce axis!") pargs = parser.parse_args() @auto_scheduler.register_workload def tvm_batch_normalization(n, c, h, w): A = te.placeholder((n, c, h, w), name = 'A') B = topi.transpose(A, (0, 2, 3, 1)) C = topi.reshape(B, [-1, c]) D = topi.sum(C, axis=0, keepdims=True) E = D / (n*h*w) F = topi.subtract(C, E) G = topi.multiply(F, F) H = topi.sum(G, axis=0, keepdims=True) I = H / (n*h*w) J = topi.sqrt(I) K = topi.divide(F, J) L = topi.reshape(K, (n, h, w, c)) M = topi.transpose(L, (0, 3, 1, 2)) mean = te.placeholder((1, c), name = 'mean') var = te.placeholder((1, c), name = 'var') m = mean * 0.9 + E * 0.1 v = var * 0.9 + J * 0.1 return [A, mean, var, M, m, v] @auto_scheduler.register_workload def tvm_normalization(c, h, w, axis): A = te.placeholder((c,h,w), name = 'A') B = topi.sum(A, axis=axis, keepdims=True) C = B / c D = topi.subtract(A, C) E = topi.multiply(D, D) F = topi.sum(E, axis=axis, keepdims=True) G = F / c H = topi.sqrt(G) I = topi.divide(D, H) return [A,I] @auto_scheduler.register_workload def tvm_reduce(c, h, w, axis): A = te.placeholder((c, h ,w), name = 'A') B = topi.sum(A, axis=axis, keepdims=True) if axis == 0: C = B / c elif axis == 1: C = B / h else: C = B / w return [A,C] @auto_scheduler.register_workload def tvm_fuse_elementwise(c1, h1, w1, c2, h2, w2, c3, h3, w3, c4, h4, w4): A = te.placeholder((c1, h1, w1), name = 'A') B = te.placeholder((c2, h2, w2), name = 'B') C = te.placeholder((c3, h3, w3), name = 'C') D = te.placeholder((c4, h4, w4), name = 'D') E = topi.add(A, B) F = topi.subtract(E, C) G = topi.multiply(F, D) return [A,B,C,D,G] target = tvm.target.Target("cuda") def batch_normalization(): n, c, h, w = 128, 256, 32, 32 task = auto_scheduler.SearchTask(func = tvm_batch_normalization, args = (n, c, h, w), target = target) return task def normalization(axis): if(axis > 2): sys.exit("axis should be less than 3") c,h,w = 128,1024,256 task = auto_scheduler.SearchTask(func = tvm_normalization, args = (c,h,w,axis), target = target) return task def reduce(axis): if(axis > 2): sys.exit("axis should be less than 3") c, h, w = 128, 1024, 256 task = auto_scheduler.SearchTask(func = tvm_reduce, args = (c, h, w, axis), target = target) return task def element_wise(): c1,h1,w1 = 128, 512, 1024 c2,h2,w2 = 1, 512, 1024 c3,h3,w3 = 128, 1, 1024 c4,h4,w4 = 128, 512, 1 task = auto_scheduler.SearchTask(func = tvm_fuse_elementwise, args = (c1,h1,w1,c2,h2,w2,c3,h3,w3,c4,h4,w4), target = target) return task if pargs.op == "normalization": task = normalization(pargs.axis) elif pargs.op == "reduce": task = reduce(pargs.axis) elif pargs.op == "element_wise": task = element_wise() elif pargs.op == "batch_normalization": task = batch_normalization() else: sys.exit("op is unkown!") print(task.compute_dag) log_file = "des.json" measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300) tune_option = auto_scheduler.TuningOptions( num_measure_trials=64, # change this to 1000 to achieve the best performance runner=measure_ctx.runner, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], verbose=2, ) # Run auto-tuning (search) task.tune(tune_option) # Apply the best schedule sch, args = task.apply_best(log_file) # Kill the measurement process del measure_ctx print("Lowered TIR:") print(tvm.lower(sch, args, simple_mode=True)) mod = tvm.build(sch, args, target) #mod.save("normalize.o") #mod = tvm.load_module("normalize.tvm"); # Check correctness def run_normalization(mod): c, h, w = 128, 1024, 256 A = np.random.uniform(size=(c, h, w)).astype(np.float32) O = np.zeros(shape=(c, h, w), dtype=np.float32) dev = tvm.gpu() tvm_A = tvm.nd.array(A, device=dev) tvm_O = tvm.nd.empty(O.shape, device=dev) for i in range(10): mod(tvm_A, tvm_O) def run_batch_normalization(mod): n, c, h, w = 128, 256, 32, 32 A = np.random.uniform(size=(n, c, h, w)).astype(np.float32) mean = np.random.uniform(size=(1, c)).astype(np.float32) var = np.random.uniform(size=(1, c)).astype(np.float32) O = np.zeros(shape=(n, c, h, w), dtype=np.float32) m = np.zeros(shape=(1, c), dtype=np.float32) v = np.zeros(shape=(1, c), dtype=np.float32) dev = tvm.gpu() tvm_A = tvm.nd.array(A, device=dev) tvm_mean = tvm.nd.array(mean, device=dev) tvm_var = tvm.nd.array(var, device=dev) tvm_O = tvm.nd.empty(O.shape, device=dev) tvm_m = tvm.nd.empty(m.shape, device=dev) tvm_v = tvm.nd.empty(v.shape, device=dev) for i in range(10): mod(tvm_A, tvm_mean, tvm_var, tvm_O, tvm_m, tvm_v) def run_element_wise(mod): c1,h1,w1 = 128, 512, 1024 c2,h2,w2 = 1, 512, 1024 c3,h3,w3 = 128, 1 , 1024 c4,h4,w4 = 128, 512, 1 A = np.random.uniform(size=(c1, h1, w1)).astype(np.float32) B = np.random.uniform(size=(c2, h2, w2)).astype(np.float32) C = np.random.uniform(size=(c3, h3, w3)).astype(np.float32) D = np.random.uniform(size=(c4, h4, w4)).astype(np.float32) O = np.zeros(shape=(c1, h1, w1), dtype=np.float32) dev = tvm.gpu() tvm_A = tvm.nd.array(A, device=dev) tvm_B = tvm.nd.array(B, device=dev) tvm_C = tvm.nd.array(C, device=dev) tvm_D = tvm.nd.array(D, device=dev) tvm_O = tvm.nd.empty(O.shape, device=dev) for i in range(10): mod(tvm_A, tvm_B, tvm_C, tvm_D, tvm_O) def run_reduce(mod, axis): c, h, w = 128, 1024, 256 A = np.random.uniform(size=(c, h, w)).astype(np.float32) if axis == 0: O = np.zeros(shape=(1, h, w), dtype=np.float32) elif axis == 1: O = np.zeros(shape=(c, 1, w), dtype=np.float32) elif axis == 2: O = np.zeros(shape=(c, h, 1), dtype=np.float32) dev = tvm.gpu() tvm_A = tvm.nd.array(A, device=dev) tvm_O = tvm.nd.empty(O.shape, device=dev) for i in range(10): mod(tvm_A, tvm_O) if pargs.op == "batch_normalization": run_batch_normalization(mod) elif pargs.op == "normalization": run_normalization(mod) elif pargs.op == "reduce": run_reduce(mod, pargs.axis) elif pargs.op == "element_wise": run_element_wise(mod) #run_element_wise(mod) #A = te.placeholder((B,N), name = 'A') #Alpha = te.placeholder((N,), name = 'Alpha') #Beta = te.placeholder((N,), name = "Beta") #C = te.compute((B,N), lambda x,y: A[x,y] + Alpha[y], name='C') #D = te.compute((B,N), lambda x,y: C[x,y] * Beta[y], name='D') #S = te.create_schedule(D.op) #S[C].compute_inline() #S[D].bind(D.op.axis[0], te.thread_axis("blockIdx.x")) #S[D].bind(D.op.axis[1], te.thread_axis("threadIdx.x")) #ir_mod = tvm.lower(S, [A, Alpha, Beta, C, D], simple_mode=True,name='x') #mod = tvm.build(S, [A, Alpha, Beta, C, D], target='cuda', target_host="c", name='xx') #print(ir_mod.astext(show_meta_data=True)) #print(mod.get_source()) for sub_mod in mod.imported_modules: print(sub_mod.get_source("cu")) # print tir #print("tir:\n", ir_m.astext(show_meta_data=True)) # print source code #print("source code:\n",rt_m.get_source())
0.34798
0.215
from .algo import Algo from .algo_code import AlgoCode from .entity_slot import Slot from .entity_space import Space from . import log, show_adding_box_log, find_smallest from .exception import DistributionException import time class AlgoSmallest(Algo): """ find the smallest bin for a given amount of bins. Output SHOULD try to have all the items. I.e A bin smaller bin with less items is worse than A bigger bin with more items @param item_collection: set of items @returns a Bin object whose size is smallest with the most amount of items """ def run(self): log.debug("Entering Algorithm SMALLEST") bin_collection = self.bins item_collection = self.items if not (bin_collection.size() > 0 and item_collection.size() > 0): raise DistributionException("Please provide atleast one bin and item") def continue_fn( bin, space, item ): if bin.occupied_space(space, item): return AlgoCode.NO_SPACE m_y = bin.get_min_y_pos(space.y) if (space.x + item.w) > bin.w: """ try z now """ space.z += item.d space.x = 0 else: space.x += 1 """ if space.z fails and so does space.x """ """ go up in height make sure y """ """ is at the proper juxtaposition """ if space.z + item.d > bin.d: space.y += m_y.max_y space.x = m_y.min_x space.z = m_y.min_z """ if were at the top of the box """ """ we cannot allocate any more space so we can move on """ if int(space.y + item.h) > bin.h: return AlgoCode.LAST_ITEM return AlgoCode.FOUND_SPACE bin = bin_collection.next() while bin: log.info("Trying to allocate items for bin: {0}".format(bin.id)) item_collection.reset() bin.start_time = time.time() item = item_collection.next() while item: item = item_collection.current() if not bin.can_fit( item ): item_collection.next() continue space = Space(x=0, y=0, z=0) can_continue = continue_fn(bin, space, item) """ if item.w > bin.w: """ """ self.binner.add_lost(item) """ while can_continue == AlgoCode.NO_SPACE: space.compute_next_sequence() can_continue = continue_fn(bin, space, item) if can_continue == AlgoCode.LAST_ITEM: continue show_adding_box_log(space, item) slot = Slot.from_space_and_item(space, item) bin.append(slot) item_collection.next() bin.end_time =time.time() bin = bin_collection.next() """ to be the smallest bin we must allocate all space of the bin and be the smallest in size """ smallest = find_smallest(item_collection, self.binner.packed_bins) self.binner.set_smallest(smallest) return self.binner
binner/algo_smallest.py
from .algo import Algo from .algo_code import AlgoCode from .entity_slot import Slot from .entity_space import Space from . import log, show_adding_box_log, find_smallest from .exception import DistributionException import time class AlgoSmallest(Algo): """ find the smallest bin for a given amount of bins. Output SHOULD try to have all the items. I.e A bin smaller bin with less items is worse than A bigger bin with more items @param item_collection: set of items @returns a Bin object whose size is smallest with the most amount of items """ def run(self): log.debug("Entering Algorithm SMALLEST") bin_collection = self.bins item_collection = self.items if not (bin_collection.size() > 0 and item_collection.size() > 0): raise DistributionException("Please provide atleast one bin and item") def continue_fn( bin, space, item ): if bin.occupied_space(space, item): return AlgoCode.NO_SPACE m_y = bin.get_min_y_pos(space.y) if (space.x + item.w) > bin.w: """ try z now """ space.z += item.d space.x = 0 else: space.x += 1 """ if space.z fails and so does space.x """ """ go up in height make sure y """ """ is at the proper juxtaposition """ if space.z + item.d > bin.d: space.y += m_y.max_y space.x = m_y.min_x space.z = m_y.min_z """ if were at the top of the box """ """ we cannot allocate any more space so we can move on """ if int(space.y + item.h) > bin.h: return AlgoCode.LAST_ITEM return AlgoCode.FOUND_SPACE bin = bin_collection.next() while bin: log.info("Trying to allocate items for bin: {0}".format(bin.id)) item_collection.reset() bin.start_time = time.time() item = item_collection.next() while item: item = item_collection.current() if not bin.can_fit( item ): item_collection.next() continue space = Space(x=0, y=0, z=0) can_continue = continue_fn(bin, space, item) """ if item.w > bin.w: """ """ self.binner.add_lost(item) """ while can_continue == AlgoCode.NO_SPACE: space.compute_next_sequence() can_continue = continue_fn(bin, space, item) if can_continue == AlgoCode.LAST_ITEM: continue show_adding_box_log(space, item) slot = Slot.from_space_and_item(space, item) bin.append(slot) item_collection.next() bin.end_time =time.time() bin = bin_collection.next() """ to be the smallest bin we must allocate all space of the bin and be the smallest in size """ smallest = find_smallest(item_collection, self.binner.packed_bins) self.binner.set_smallest(smallest) return self.binner
0.53777
0.282209
import numpy as np import pandas as pd import pytest from typing import Type from sklearn.datasets import load_wine, load_breast_cancer from sklearn.ensemble import VotingClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, log_loss from sklearn.pipeline import Pipeline from gama.postprocessing import EnsemblePostProcessing from gama.search_methods import AsynchronousSuccessiveHalving, AsyncEA, RandomSearch from gama.search_methods.base_search import BaseSearch from gama.utilities.generic.stopwatch import Stopwatch from gama import GamaClassifier import warnings warnings.filterwarnings("error") FIT_TIME_MARGIN = 1.1 # While we could derive statistics dynamically, # we want to know if any changes ever happen, so we save them statically. breast_cancer = dict( name="breast_cancer", load=load_breast_cancer, test_size=143, n_classes=2, base_accuracy=0.62937, base_log_loss=12.80138, ) breast_cancer_missing = dict( name="breast_cancer_missing", load=load_breast_cancer, target="status", test_size=143, n_classes=2, base_accuracy=0.62937, base_log_loss=12.80138, ) wine = dict( name="wine", load=load_wine, test_size=45, n_classes=3, base_accuracy=0.4, base_log_loss=20.72326, ) iris_arff = dict( name="iris", train="tests/data/iris_train.arff", test="tests/data/iris_test.arff", test_size=50, n_classes=3, base_accuracy=0.3333, base_log_loss=1.09861, ) diabetes_arff = dict( name="diabetes", train="tests/data/diabetes_train.arff", test="tests/data/diabetes_test.arff", test_size=150, n_classes=2, base_accuracy=0.65104, base_log_loss=0.63705, ) def _test_dataset_problem( data, metric: str, arff: bool = False, y_type: Type = pd.DataFrame, search: BaseSearch = AsyncEA(), missing_values: bool = False, max_time: int = 60, ): """ :param data: :param metric: :param arff: :param y_type: pd.DataFrame, pd.Series, np.ndarray or str :return: """ gama = GamaClassifier( random_state=0, max_total_time=max_time, scoring=metric, search_method=search, n_jobs=1, post_processing_method=EnsemblePostProcessing(ensemble_size=5), ) if arff: train_path = f"tests/data/{data['name']}_train.arff" test_path = f"tests/data/{data['name']}_test.arff" X, y = data["load"](return_X_y=True) X_train, X_test, y_train, y_test = train_test_split( X, y, stratify=y, random_state=0 ) y_test = [str(val) for val in y_test] with Stopwatch() as sw: gama.fit_from_file(train_path, target_column=data["target"]) class_predictions = gama.predict_from_file( test_path, target_column=data["target"] ) class_probabilities = gama.predict_proba_from_file( test_path, target_column=data["target"] ) gama_score = gama.score_from_file(test_path) else: X, y = data["load"](return_X_y=True) if y_type == str: databunch = data["load"]() y = np.asarray([databunch.target_names[c_i] for c_i in databunch.target]) if y_type in [pd.Series, pd.DataFrame]: y = y_type(y) X_train, X_test, y_train, y_test = train_test_split( X, y, stratify=y, random_state=0 ) if missing_values: X_train[1:300:2, 0] = X_train[2:300:5, 1] = float("NaN") X_test[1:100:2, 0] = X_test[2:100:5, 1] = float("NaN") with Stopwatch() as sw: gama.fit(X_train, y_train) class_predictions = gama.predict(X_test) class_probabilities = gama.predict_proba(X_test) gama_score = gama.score(X_test, y_test) assert ( 60 * FIT_TIME_MARGIN > sw.elapsed_time ), "fit must stay within 110% of allotted time." assert isinstance( class_predictions, np.ndarray ), "predictions should be numpy arrays." assert ( data["test_size"], ) == class_predictions.shape, "predict should return (N,) shaped array." accuracy = accuracy_score(y_test, class_predictions) # Majority classifier on this split achieves 0.6293706293706294 print(data["name"], metric, "accuracy:", accuracy) assert ( data["base_accuracy"] <= accuracy ), "predictions should be at least as good as majority class." assert isinstance( class_probabilities, np.ndarray ), "probability predictions should be numpy arrays." assert (data["test_size"], data["n_classes"]) == class_probabilities.shape, ( "predict_proba should return" " (N,K) shaped array." ) # Majority classifier on this split achieves 12.80138131184662 logloss = log_loss(y_test, class_probabilities) print(data["name"], metric, "log-loss:", logloss) assert ( data["base_log_loss"] >= logloss ), "predictions should be at least as good as majority class." score_to_match = logloss if metric == "neg_log_loss" else accuracy assert score_to_match == pytest.approx(gama_score) return gama def test_binary_classification_accuracy(): """ Binary classification, accuracy, numpy data and ensemble code export """ gama = _test_dataset_problem(breast_cancer, "accuracy") x, y = breast_cancer["load"](return_X_y=True) code = gama.export_script(file=None) local = {} exec(code, {}, local) pipeline = local["pipeline"] # should be defined in exported code assert isinstance(pipeline, Pipeline) assert isinstance(pipeline.steps[-1][-1], VotingClassifier) pipeline.fit(x, y) assert 0.9 < pipeline.score(x, y) def test_binary_classification_accuracy_asha(): """ Binary classification, accuracy, numpy data, ASHA search. """ _test_dataset_problem( breast_cancer, "accuracy", search=AsynchronousSuccessiveHalving(), max_time=60 ) def test_binary_classification_accuracy_random_search(): """ Binary classification, accuracy, numpy data, random search. """ _test_dataset_problem(breast_cancer, "accuracy", search=RandomSearch()) def test_binary_classification_logloss(): """ Binary classification, log loss (probabilities), numpy data, ASHA search. """ _test_dataset_problem(breast_cancer, "neg_log_loss") def test_multiclass_classification_accuracy(): """ Multiclass classification, accuracy, numpy data. """ _test_dataset_problem(wine, "accuracy") def test_multiclass_classification_logloss(): """ Multiclass classification, log loss (probabilities), numpy data. """ _test_dataset_problem(wine, "neg_log_loss") def test_string_label_classification_accuracy(): """ Binary classification, accuracy, target is str. """ _test_dataset_problem(breast_cancer, "accuracy", y_type=str) def test_string_label_classification_log_loss(): """ Binary classification, log loss (probabilities), target is str. """ _test_dataset_problem(breast_cancer, "neg_log_loss", y_type=str) def test_missing_value_classification_arff(): """ Binary classification, log loss (probabilities), arff data. """ _test_dataset_problem(breast_cancer_missing, "neg_log_loss", arff=True) def test_missing_value_classification(): """ Binary classification, log loss (probabilities), missing values. """ _test_dataset_problem(breast_cancer_missing, "neg_log_loss", missing_values=True)
tests/system/test_gamaclassifier.py
import numpy as np import pandas as pd import pytest from typing import Type from sklearn.datasets import load_wine, load_breast_cancer from sklearn.ensemble import VotingClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, log_loss from sklearn.pipeline import Pipeline from gama.postprocessing import EnsemblePostProcessing from gama.search_methods import AsynchronousSuccessiveHalving, AsyncEA, RandomSearch from gama.search_methods.base_search import BaseSearch from gama.utilities.generic.stopwatch import Stopwatch from gama import GamaClassifier import warnings warnings.filterwarnings("error") FIT_TIME_MARGIN = 1.1 # While we could derive statistics dynamically, # we want to know if any changes ever happen, so we save them statically. breast_cancer = dict( name="breast_cancer", load=load_breast_cancer, test_size=143, n_classes=2, base_accuracy=0.62937, base_log_loss=12.80138, ) breast_cancer_missing = dict( name="breast_cancer_missing", load=load_breast_cancer, target="status", test_size=143, n_classes=2, base_accuracy=0.62937, base_log_loss=12.80138, ) wine = dict( name="wine", load=load_wine, test_size=45, n_classes=3, base_accuracy=0.4, base_log_loss=20.72326, ) iris_arff = dict( name="iris", train="tests/data/iris_train.arff", test="tests/data/iris_test.arff", test_size=50, n_classes=3, base_accuracy=0.3333, base_log_loss=1.09861, ) diabetes_arff = dict( name="diabetes", train="tests/data/diabetes_train.arff", test="tests/data/diabetes_test.arff", test_size=150, n_classes=2, base_accuracy=0.65104, base_log_loss=0.63705, ) def _test_dataset_problem( data, metric: str, arff: bool = False, y_type: Type = pd.DataFrame, search: BaseSearch = AsyncEA(), missing_values: bool = False, max_time: int = 60, ): """ :param data: :param metric: :param arff: :param y_type: pd.DataFrame, pd.Series, np.ndarray or str :return: """ gama = GamaClassifier( random_state=0, max_total_time=max_time, scoring=metric, search_method=search, n_jobs=1, post_processing_method=EnsemblePostProcessing(ensemble_size=5), ) if arff: train_path = f"tests/data/{data['name']}_train.arff" test_path = f"tests/data/{data['name']}_test.arff" X, y = data["load"](return_X_y=True) X_train, X_test, y_train, y_test = train_test_split( X, y, stratify=y, random_state=0 ) y_test = [str(val) for val in y_test] with Stopwatch() as sw: gama.fit_from_file(train_path, target_column=data["target"]) class_predictions = gama.predict_from_file( test_path, target_column=data["target"] ) class_probabilities = gama.predict_proba_from_file( test_path, target_column=data["target"] ) gama_score = gama.score_from_file(test_path) else: X, y = data["load"](return_X_y=True) if y_type == str: databunch = data["load"]() y = np.asarray([databunch.target_names[c_i] for c_i in databunch.target]) if y_type in [pd.Series, pd.DataFrame]: y = y_type(y) X_train, X_test, y_train, y_test = train_test_split( X, y, stratify=y, random_state=0 ) if missing_values: X_train[1:300:2, 0] = X_train[2:300:5, 1] = float("NaN") X_test[1:100:2, 0] = X_test[2:100:5, 1] = float("NaN") with Stopwatch() as sw: gama.fit(X_train, y_train) class_predictions = gama.predict(X_test) class_probabilities = gama.predict_proba(X_test) gama_score = gama.score(X_test, y_test) assert ( 60 * FIT_TIME_MARGIN > sw.elapsed_time ), "fit must stay within 110% of allotted time." assert isinstance( class_predictions, np.ndarray ), "predictions should be numpy arrays." assert ( data["test_size"], ) == class_predictions.shape, "predict should return (N,) shaped array." accuracy = accuracy_score(y_test, class_predictions) # Majority classifier on this split achieves 0.6293706293706294 print(data["name"], metric, "accuracy:", accuracy) assert ( data["base_accuracy"] <= accuracy ), "predictions should be at least as good as majority class." assert isinstance( class_probabilities, np.ndarray ), "probability predictions should be numpy arrays." assert (data["test_size"], data["n_classes"]) == class_probabilities.shape, ( "predict_proba should return" " (N,K) shaped array." ) # Majority classifier on this split achieves 12.80138131184662 logloss = log_loss(y_test, class_probabilities) print(data["name"], metric, "log-loss:", logloss) assert ( data["base_log_loss"] >= logloss ), "predictions should be at least as good as majority class." score_to_match = logloss if metric == "neg_log_loss" else accuracy assert score_to_match == pytest.approx(gama_score) return gama def test_binary_classification_accuracy(): """ Binary classification, accuracy, numpy data and ensemble code export """ gama = _test_dataset_problem(breast_cancer, "accuracy") x, y = breast_cancer["load"](return_X_y=True) code = gama.export_script(file=None) local = {} exec(code, {}, local) pipeline = local["pipeline"] # should be defined in exported code assert isinstance(pipeline, Pipeline) assert isinstance(pipeline.steps[-1][-1], VotingClassifier) pipeline.fit(x, y) assert 0.9 < pipeline.score(x, y) def test_binary_classification_accuracy_asha(): """ Binary classification, accuracy, numpy data, ASHA search. """ _test_dataset_problem( breast_cancer, "accuracy", search=AsynchronousSuccessiveHalving(), max_time=60 ) def test_binary_classification_accuracy_random_search(): """ Binary classification, accuracy, numpy data, random search. """ _test_dataset_problem(breast_cancer, "accuracy", search=RandomSearch()) def test_binary_classification_logloss(): """ Binary classification, log loss (probabilities), numpy data, ASHA search. """ _test_dataset_problem(breast_cancer, "neg_log_loss") def test_multiclass_classification_accuracy(): """ Multiclass classification, accuracy, numpy data. """ _test_dataset_problem(wine, "accuracy") def test_multiclass_classification_logloss(): """ Multiclass classification, log loss (probabilities), numpy data. """ _test_dataset_problem(wine, "neg_log_loss") def test_string_label_classification_accuracy(): """ Binary classification, accuracy, target is str. """ _test_dataset_problem(breast_cancer, "accuracy", y_type=str) def test_string_label_classification_log_loss(): """ Binary classification, log loss (probabilities), target is str. """ _test_dataset_problem(breast_cancer, "neg_log_loss", y_type=str) def test_missing_value_classification_arff(): """ Binary classification, log loss (probabilities), arff data. """ _test_dataset_problem(breast_cancer_missing, "neg_log_loss", arff=True) def test_missing_value_classification(): """ Binary classification, log loss (probabilities), missing values. """ _test_dataset_problem(breast_cancer_missing, "neg_log_loss", missing_values=True)
0.910439
0.407717
import clip import torch from PIL import Image from multiprocessing import cpu_count device = "cuda" if torch.cuda.is_available() else "cpu" use_jit = torch.cuda.is_available() and '1.7.1' in torch.__version__ class CLIPDataset(torch.utils.data.Dataset): def __init__(self, dataframe, preprocess): self.dataframe = dataframe self.image_transform = preprocess self.tokenizer = clip.tokenize def __len__(self): return len(self.dataframe) def __getitem__(self, index): row = self.dataframe.iloc[index] return ( self.image_transform(Image.open(row["PATH"])), self.tokenizer(str(row["TEXT"]), truncate=True)[0], ) class CLIP: def __init__(self): self.model, self.preprocess = clip.load("ViT-B/32", device=device, jit=use_jit) self.cosine_similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-6) with torch.no_grad(): self.categories = self.model.encode_text(clip.tokenize(["neutral","selfie", "illustration, drawing", "toys, play, kids, children", "teddy bear, puppet", "animal, bird, mammal, insect" "fashion, clothes", "logo, commercial, ad, advertisement", "drawing, painting","anime, cartoon","comedy, fun","romance, love story","thriller, suspense, crime story","action, action movie", "horror, monster movie", "documentary", "news, journalism", "entertainment", "talk show", "porn, sex, sperm, nipples, breats, tits, boops, penis, dick, cock, clitoris, vagina, fuck, lust, horny, sexual, lick, licking", "porn, sex, sperm, nipples", "porn, sex, sperm, penis, dick, cock", "nipples, breasts, tits, boops, sexy", "penis, dick, cock", "clitoris, vagina", "sex, fuck, lust, horny, sexual, lick, licking", "porn, sex, sexy","sexy, hot","sperm, skin","lust, horny, sexual","lick, licking, body", "anime, hentai, sexy", "cartoon, sexy, sex", "hentai", "anime, sexy, breasts", "hentai"]).to(device)) self.underaged_categories = self.model.encode_text(clip.tokenize(["teenager, teen", "kid, child, teenager, teen, baby or toddler, underaged, little girl, little boy", "kid, child, little girl, little boy", "baby, toddler","adult, woman, man, grownup, grown person,full-aged of legal age","full-aged, of legal age, adult","woman, man","adult, woman, man, grownup, grown person,full-aged of legal age"]).to(device)) self.animal_categories = self.model.encode_text(clip.tokenize(["lifeless object, thing", "thing, object", "material", "furniture","wall", "house", "tree", "wood","ground","industry", "table", "bed", "tool", "dress, clothes", "door", "chair", "rock, stone", "human", "man", "woman", "man, woman", "animal","cat","dog", "cow", "pig", "goat", "sheep", "elephant", "horse", "horse, elephant, pig, dog, cat, sheep, goat, animal", "life", "wildlife"]).to(device)) def similarity_imgalt(self, image_tensor, text_tokens): with torch.no_grad(): image_features = self.model.encode_image(image_tensor.to(device)).float() text_features = self.model.encode_text(text_tokens.to(device)).float() similarity = self.cosine_similarity(image_features, text_features).tolist() image_features = image_features.detach().cpu().numpy() return image_features, similarity def preprocess_images(self, df): ret_image_features = [] ret_similarity = [] batch_size = 256 if device == "cuda" else 8 dataset = CLIPDataset(df, self.preprocess) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=int(cpu_count()-3), pin_memory=True) for tensors, tokens in dataloader: image_features, similarities = self.similarity_imgalt(tensors, tokens) ret_image_features.extend(image_features) ret_similarity.extend(similarities) return ret_image_features, ret_similarity def prob(self, image_features, text_features): text_features = text_features.float() image_features = torch.as_tensor(image_features).to(device, dtype=torch.float32) image_features /= image_features.norm(dim=-1, keepdim=True) text_features /= text_features.norm(dim=-1, keepdim=True) # cosine similarity as logits similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1) _, indices = similarity.topk(2) return indices clip_filter = CLIP() def df_clipfilter(df): sim_threshold = 0.28 underaged_text = ["teen", "kid", "child", "baby"] img_embedding, similarities = clip_filter.preprocess_images(df) tmp_embed = [] df["dropped"] = False for i, img_embed in enumerate(img_embedding): if similarities[i] < sim_threshold: df.at[i, 'dropped'] = True continue # get most similar categories nsfw_prob = clip_filter.prob(img_embed, clip_filter.categories) df.at[i, "NSFW"] = "UNSURE" df.at[i, "similarity"] = similarities[i] if nsfw_prob[0] < 19 and nsfw_prob[1] < 19: df.at[i, "NSFW"] = "UNLIKELY" tmp_embed.append(img_embed) continue elif nsfw_prob[0] >= 19 and nsfw_prob[1] >= 19: df.at[i, "NSFW"] = "NSFW" underage_prob = clip_filter.prob(img_embed, clip_filter.underaged_categories) if underage_prob[0] < 4 or underage_prob[1] < 4 or any(x in df.at[i, "TEXT"] for x in underaged_text): df.at[i, 'dropped'] = True continue animal_prob = clip_filter.prob(img_embed, clip_filter.animal_categories) if animal_prob[0] > 20: df.at[i, 'dropped'] = True continue tmp_embed.append(img_embed) df = df[df["dropped"] != True] df.reset_index(drop=True, inplace=True) return tmp_embed, df def filter(df, out_fname, output_folder): with open(f"{output_folder}hashes-{out_fname}.clp", "wt") as f: for item in df["hash"]: f.write(item + "\n") results = [] img_embeddings, dff = df_clipfilter(df) dff.to_csv(f"{output_folder}{out_fname}.csv", index=False, sep="|") dff.loc[:,"shard"] = dff.PATH.apply(lambda x: x.split("/")[1]) results = dff["shard"].value_counts() with open(f"{output_folder}hashes-{out_fname}.hsh", "wt") as f: for item in dff["hash"]: f.write(item + "\n") return len(dff), results
clip_filter.py
import clip import torch from PIL import Image from multiprocessing import cpu_count device = "cuda" if torch.cuda.is_available() else "cpu" use_jit = torch.cuda.is_available() and '1.7.1' in torch.__version__ class CLIPDataset(torch.utils.data.Dataset): def __init__(self, dataframe, preprocess): self.dataframe = dataframe self.image_transform = preprocess self.tokenizer = clip.tokenize def __len__(self): return len(self.dataframe) def __getitem__(self, index): row = self.dataframe.iloc[index] return ( self.image_transform(Image.open(row["PATH"])), self.tokenizer(str(row["TEXT"]), truncate=True)[0], ) class CLIP: def __init__(self): self.model, self.preprocess = clip.load("ViT-B/32", device=device, jit=use_jit) self.cosine_similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-6) with torch.no_grad(): self.categories = self.model.encode_text(clip.tokenize(["neutral","selfie", "illustration, drawing", "toys, play, kids, children", "teddy bear, puppet", "animal, bird, mammal, insect" "fashion, clothes", "logo, commercial, ad, advertisement", "drawing, painting","anime, cartoon","comedy, fun","romance, love story","thriller, suspense, crime story","action, action movie", "horror, monster movie", "documentary", "news, journalism", "entertainment", "talk show", "porn, sex, sperm, nipples, breats, tits, boops, penis, dick, cock, clitoris, vagina, fuck, lust, horny, sexual, lick, licking", "porn, sex, sperm, nipples", "porn, sex, sperm, penis, dick, cock", "nipples, breasts, tits, boops, sexy", "penis, dick, cock", "clitoris, vagina", "sex, fuck, lust, horny, sexual, lick, licking", "porn, sex, sexy","sexy, hot","sperm, skin","lust, horny, sexual","lick, licking, body", "anime, hentai, sexy", "cartoon, sexy, sex", "hentai", "anime, sexy, breasts", "hentai"]).to(device)) self.underaged_categories = self.model.encode_text(clip.tokenize(["teenager, teen", "kid, child, teenager, teen, baby or toddler, underaged, little girl, little boy", "kid, child, little girl, little boy", "baby, toddler","adult, woman, man, grownup, grown person,full-aged of legal age","full-aged, of legal age, adult","woman, man","adult, woman, man, grownup, grown person,full-aged of legal age"]).to(device)) self.animal_categories = self.model.encode_text(clip.tokenize(["lifeless object, thing", "thing, object", "material", "furniture","wall", "house", "tree", "wood","ground","industry", "table", "bed", "tool", "dress, clothes", "door", "chair", "rock, stone", "human", "man", "woman", "man, woman", "animal","cat","dog", "cow", "pig", "goat", "sheep", "elephant", "horse", "horse, elephant, pig, dog, cat, sheep, goat, animal", "life", "wildlife"]).to(device)) def similarity_imgalt(self, image_tensor, text_tokens): with torch.no_grad(): image_features = self.model.encode_image(image_tensor.to(device)).float() text_features = self.model.encode_text(text_tokens.to(device)).float() similarity = self.cosine_similarity(image_features, text_features).tolist() image_features = image_features.detach().cpu().numpy() return image_features, similarity def preprocess_images(self, df): ret_image_features = [] ret_similarity = [] batch_size = 256 if device == "cuda" else 8 dataset = CLIPDataset(df, self.preprocess) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=int(cpu_count()-3), pin_memory=True) for tensors, tokens in dataloader: image_features, similarities = self.similarity_imgalt(tensors, tokens) ret_image_features.extend(image_features) ret_similarity.extend(similarities) return ret_image_features, ret_similarity def prob(self, image_features, text_features): text_features = text_features.float() image_features = torch.as_tensor(image_features).to(device, dtype=torch.float32) image_features /= image_features.norm(dim=-1, keepdim=True) text_features /= text_features.norm(dim=-1, keepdim=True) # cosine similarity as logits similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1) _, indices = similarity.topk(2) return indices clip_filter = CLIP() def df_clipfilter(df): sim_threshold = 0.28 underaged_text = ["teen", "kid", "child", "baby"] img_embedding, similarities = clip_filter.preprocess_images(df) tmp_embed = [] df["dropped"] = False for i, img_embed in enumerate(img_embedding): if similarities[i] < sim_threshold: df.at[i, 'dropped'] = True continue # get most similar categories nsfw_prob = clip_filter.prob(img_embed, clip_filter.categories) df.at[i, "NSFW"] = "UNSURE" df.at[i, "similarity"] = similarities[i] if nsfw_prob[0] < 19 and nsfw_prob[1] < 19: df.at[i, "NSFW"] = "UNLIKELY" tmp_embed.append(img_embed) continue elif nsfw_prob[0] >= 19 and nsfw_prob[1] >= 19: df.at[i, "NSFW"] = "NSFW" underage_prob = clip_filter.prob(img_embed, clip_filter.underaged_categories) if underage_prob[0] < 4 or underage_prob[1] < 4 or any(x in df.at[i, "TEXT"] for x in underaged_text): df.at[i, 'dropped'] = True continue animal_prob = clip_filter.prob(img_embed, clip_filter.animal_categories) if animal_prob[0] > 20: df.at[i, 'dropped'] = True continue tmp_embed.append(img_embed) df = df[df["dropped"] != True] df.reset_index(drop=True, inplace=True) return tmp_embed, df def filter(df, out_fname, output_folder): with open(f"{output_folder}hashes-{out_fname}.clp", "wt") as f: for item in df["hash"]: f.write(item + "\n") results = [] img_embeddings, dff = df_clipfilter(df) dff.to_csv(f"{output_folder}{out_fname}.csv", index=False, sep="|") dff.loc[:,"shard"] = dff.PATH.apply(lambda x: x.split("/")[1]) results = dff["shard"].value_counts() with open(f"{output_folder}hashes-{out_fname}.hsh", "wt") as f: for item in dff["hash"]: f.write(item + "\n") return len(dff), results
0.591605
0.275191
import warnings import numpy as np from ...surface import _normalize_vectors from ...utils import _import_mlab, _validate_type class _Projection(object): """Class storing projection information. Attributes ---------- xy : array Result of 2d projection of 3d data. pts : Source Mayavi source handle. """ def __init__(self, xy=None, pts=None): """Store input projection information into attributes.""" self.xy = xy self.pts = pts def visible(self, state): """Modify visibility attribute of the source.""" self.pts.visible = state class _Renderer(object): """Class managing rendering scene. Attributes ---------- mlab: mayavi.mlab Main Mayavi access point. fig: mlab.Figure Mayavi scene handle. """ def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.), name=None, show=False): """Set up the scene. Parameters ---------- fig: instance of mayavi.mlab.figure Scene handle. size : tuple The dimensions of the context window: (width, height). bgcolor: tuple The color definition of the background: (red, green, blue). name: str | None The name of the scene. """ self.mlab = _import_mlab() if fig is None: self.fig = _mlab_figure(figure=name, bgcolor=bgcolor, size=size) else: self.fig = fig if show is False: _toggle_mlab_render(self.fig, False) def scene(self): """Return scene handle.""" return self.fig def set_interactive(self): """Enable interactive mode.""" from tvtk.api import tvtk if self.fig.scene is not None: self.fig.scene.interactor.interactor_style = \ tvtk.InteractorStyleTerrain() def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False, backface_culling=False, **kwargs): """Add a mesh in the scene. Parameters ---------- x: array, shape (n_vertices,) The array containing the X component of the vertices. y: array, shape (n_vertices,) The array containing the Y component of the vertices. z: array, shape (n_vertices,) The array containing the Z component of the vertices. triangles: array, shape (n_polygons, 3) The array containing the indices of the polygons. color: tuple The color of the mesh: (red, green, blue). opacity: float The opacity of the mesh. shading: bool If True, enable the mesh shading. backface_culling: bool If True, enable backface culling on the mesh. kwargs: args The arguments to pass to triangular_mesh """ surface = self.mlab.triangular_mesh(x, y, z, triangles, color=color, opacity=opacity, figure=self.fig, **kwargs) surface.actor.property.shading = shading surface.actor.property.backface_culling = backface_culling return surface def contour(self, surface, scalars, contours, line_width=1.0, opacity=1.0, vmin=None, vmax=None, colormap=None): """Add a contour in the scene. Parameters ---------- surface: surface object The mesh to use as support for contour. scalars: ndarray, shape (n_vertices,) The scalar valued associated to the vertices. contours: int | list Specifying a list of values will only give the requested contours. line_width: float The width of the lines. opacity: float The opacity of the contour. vmin: float | None vmin is used to scale the colormap. If None, the min of the data will be used vmax: float | None vmax is used to scale the colormap. If None, the max of the data will be used colormap: The colormap to use. """ mesh = _create_mesh_surf(surface, self.fig, scalars=scalars) cont = self.mlab.pipeline.contour_surface( mesh, contours=contours, line_width=1.0, vmin=vmin, vmax=vmax, opacity=opacity, figure=self.fig) cont.module_manager.scalar_lut_manager.lut.table = colormap def surface(self, surface, color=None, opacity=1.0, vmin=None, vmax=None, colormap=None, scalars=None, backface_culling=False): """Add a surface in the scene. Parameters ---------- surface: surface object The information describing the surface. color: tuple The color of the surface: (red, green, blue). opacity: float The opacity of the surface. vmin: float | None vmin is used to scale the colormap. If None, the min of the data will be used vmax: float | None vmax is used to scale the colormap. If None, the max of the data will be used colormap: The colormap to use. backface_culling: bool If True, enable backface culling on the surface. """ # Make a solid surface mesh = _create_mesh_surf(surface, self.fig, scalars=scalars) surface = self.mlab.pipeline.surface( mesh, color=color, opacity=opacity, vmin=vmin, vmax=vmax, figure=self.fig) if colormap is not None: surface.module_manager.scalar_lut_manager.lut.table = colormap surface.actor.property.backface_culling = backface_culling def sphere(self, center, color, scale, opacity=1.0, backface_culling=False): """Add sphere in the scene. Parameters ---------- center: ndarray, shape(n_center, 3) The list of centers to use for the sphere(s). color: tuple The color of the sphere(s): (red, green, blue). scale: float The scale of the sphere(s). opacity: float The opacity of the sphere(s). backface_culling: bool If True, enable backface culling on the sphere(s). """ if center.ndim == 1: x = center[0] y = center[1] z = center[2] elif center.ndim == 2: x = center[:, 0] y = center[:, 1] z = center[:, 2] surface = self.mlab.points3d(x, y, z, color=color, scale_factor=scale, opacity=opacity, figure=self.fig) surface.actor.property.backface_culling = backface_culling def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, glyph_height=None, glyph_center=None, glyph_resolution=None, opacity=1.0, scale_mode='none', scalars=None, backface_culling=False): """Add quiver3d in the scene. Parameters ---------- x: array, shape (n_quivers,) The X component of the position of the quiver. y: array, shape (n_quivers,) The Y component of the position of the quiver. z: array, shape (n_quivers,) The Z component of the position of the quiver. u: array, shape (n_quivers,) The last X component of the quiver. v: array, shape (n_quivers,) The last Y component of the quiver. w: array, shape (n_quivers,) The last Z component of the quiver. color: tuple The color of the quiver: (red, green, blue). scale: float The scale of the quiver. mode: 'arrow' or 'cylinder' The type of the quiver. resolution: float The resolution of the arrow. glyph_height: float The height of the glyph used with the quiver. glyph_center: tuple The center of the glyph used with the quiver: (x, y, z). glyph_resolution: float The resolution of the glyph used with the quiver. opacity: float The opacity of the quiver. scale_mode: 'vector', 'scalar' or 'none' The scaling mode for the glyph. scalars: array, shape (n_quivers,) | None The optional scalar data to use. backface_culling: bool If True, enable backface culling on the quiver. """ if mode == 'arrow': self.mlab.quiver3d(x, y, z, u, v, w, mode=mode, color=color, scale_factor=scale, scale_mode=scale_mode, resolution=resolution, scalars=scalars, opacity=opacity, figure=self.fig) elif mode == 'cylinder': quiv = self.mlab.quiver3d(x, y, z, u, v, w, mode=mode, color=color, scale_factor=scale, opacity=opacity, figure=self.fig) quiv.glyph.glyph_source.glyph_source.height = glyph_height quiv.glyph.glyph_source.glyph_source.center = glyph_center quiv.glyph.glyph_source.glyph_source.resolution = glyph_resolution quiv.actor.property.backface_culling = backface_culling def text(self, x, y, text, width): """Add test in the scene. Parameters ---------- x: float The X component to use as position of the text. y: float The Y component to use as position of the text. text: str The content of the text. width: float The width of the text. """ self.mlab.text(x, y, text, width=width, figure=self.fig) def show(self): """Render the scene.""" _toggle_mlab_render(self.fig, True) def set_camera(self, azimuth=None, elevation=None, distance=None, focalpoint=None): """Configure the camera of the scene. Parameters ---------- azimuth: float The azimuthal angle of the camera. elevation: float The zenith angle of the camera. distance: float The distance to the focal point. focalpoint: tuple The focal point of the camera: (x, y, z). """ self.mlab.view(azimuth, elevation, distance, focalpoint=focalpoint, figure=self.fig) def screenshot(self): """Take a screenshot of the scene.""" return self.mlab.screenshot(self.fig) def project(self, xyz, ch_names): """Convert 3d points to a 2d perspective. Parameters ---------- xyz: array, shape(n_points, 3) The points to project. ch_names: array, shape(_n_points,) Names of the channels. """ xy = _3d_to_2d(self.fig, xyz) xy = dict(zip(ch_names, xy)) pts = self.fig.children[-1] return _Projection(xy=xy, pts=pts) def _mlab_figure(**kwargs): """Create a Mayavi figure using our defaults.""" from mayavi import mlab fig = mlab.figure(**kwargs) # If using modern VTK/Mayavi, improve rendering with FXAA if hasattr(getattr(fig.scene, 'renderer', None), 'use_fxaa'): fig.scene.renderer.use_fxaa = True return fig def _toggle_mlab_render(fig, render): mlab = _import_mlab() if mlab.options.backend != 'test': fig.scene.disable_render = not render def _create_mesh_surf(surf, fig=None, scalars=None, vtk_normals=True): """Create Mayavi mesh from MNE surf.""" mlab = _import_mlab() x, y, z = surf['rr'].T with warnings.catch_warnings(record=True): # traits mesh = mlab.pipeline.triangular_mesh_source( x, y, z, surf['tris'], scalars=scalars, figure=fig) if vtk_normals: mesh = mlab.pipeline.poly_data_normals(mesh) mesh.filter.compute_cell_normals = False mesh.filter.consistency = False mesh.filter.non_manifold_traversal = False mesh.filter.splitting = False else: # make absolutely sure these are normalized for Mayavi nn = surf['nn'].copy() _normalize_vectors(nn) mesh.data.point_data.normals = nn mesh.data.cell_data.normals = None return mesh def _3d_to_2d(fig, xyz): """Convert 3d points to a 2d perspective using a Mayavi Scene.""" from mayavi.core.scene import Scene _validate_type(fig, Scene, "fig", "Scene") xyz = np.column_stack([xyz, np.ones(xyz.shape[0])]) # Transform points into 'unnormalized' view coordinates comb_trans_mat = _get_world_to_view_matrix(fig.scene) view_coords = np.dot(comb_trans_mat, xyz.T).T # Divide through by the fourth element for normalized view coords norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1)) # Transform from normalized view coordinates to display coordinates. view_to_disp_mat = _get_view_to_display_matrix(fig.scene) xy = np.dot(view_to_disp_mat, norm_view_coords.T).T # Pull the first two columns since they're meaningful for 2d plotting xy = xy[:, :2] return xy def _get_world_to_view_matrix(scene): """Return the 4x4 matrix to transform xyz space to the current view. This is a concatenation of the model view and perspective transforms. """ from mayavi.core.ui.mayavi_scene import MayaviScene from tvtk.pyface.tvtk_scene import TVTKScene _validate_type(scene, (MayaviScene, TVTKScene), "scene", "TVTKScene/MayaviScene") cam = scene.camera # The VTK method needs the aspect ratio and near and far # clipping planes in order to return the proper transform. scene_size = tuple(scene.get_size()) clip_range = cam.clipping_range aspect_ratio = float(scene_size[0]) / scene_size[1] # Get the vtk matrix object using the aspect ratio we defined vtk_comb_trans_mat = cam.get_composite_projection_transform_matrix( aspect_ratio, clip_range[0], clip_range[1]) vtk_comb_trans_mat = vtk_comb_trans_mat.to_array() return vtk_comb_trans_mat def _get_view_to_display_matrix(scene): """Return the 4x4 matrix to convert view coordinates to display coordinates. It's assumed that the view should take up the entire window and that the origin of the window is in the upper left corner. """ # noqa: E501 from mayavi.core.ui.mayavi_scene import MayaviScene from tvtk.pyface.tvtk_scene import TVTKScene _validate_type(scene, (MayaviScene, TVTKScene), "scene", "TVTKScene/MayaviScene") # normalized view coordinates have the origin in the middle of the space # so we need to scale by width and height of the display window and shift # by half width and half height. The matrix accomplishes that. x, y = tuple(scene.get_size()) view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0], [0., -y / 2.0, 0., y / 2.0], [0., 0., 1., 0.], [0., 0., 0., 1.]]) return view_to_disp_mat
mne/viz/backends/_pysurfer_mayavi.py
import warnings import numpy as np from ...surface import _normalize_vectors from ...utils import _import_mlab, _validate_type class _Projection(object): """Class storing projection information. Attributes ---------- xy : array Result of 2d projection of 3d data. pts : Source Mayavi source handle. """ def __init__(self, xy=None, pts=None): """Store input projection information into attributes.""" self.xy = xy self.pts = pts def visible(self, state): """Modify visibility attribute of the source.""" self.pts.visible = state class _Renderer(object): """Class managing rendering scene. Attributes ---------- mlab: mayavi.mlab Main Mayavi access point. fig: mlab.Figure Mayavi scene handle. """ def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.), name=None, show=False): """Set up the scene. Parameters ---------- fig: instance of mayavi.mlab.figure Scene handle. size : tuple The dimensions of the context window: (width, height). bgcolor: tuple The color definition of the background: (red, green, blue). name: str | None The name of the scene. """ self.mlab = _import_mlab() if fig is None: self.fig = _mlab_figure(figure=name, bgcolor=bgcolor, size=size) else: self.fig = fig if show is False: _toggle_mlab_render(self.fig, False) def scene(self): """Return scene handle.""" return self.fig def set_interactive(self): """Enable interactive mode.""" from tvtk.api import tvtk if self.fig.scene is not None: self.fig.scene.interactor.interactor_style = \ tvtk.InteractorStyleTerrain() def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False, backface_culling=False, **kwargs): """Add a mesh in the scene. Parameters ---------- x: array, shape (n_vertices,) The array containing the X component of the vertices. y: array, shape (n_vertices,) The array containing the Y component of the vertices. z: array, shape (n_vertices,) The array containing the Z component of the vertices. triangles: array, shape (n_polygons, 3) The array containing the indices of the polygons. color: tuple The color of the mesh: (red, green, blue). opacity: float The opacity of the mesh. shading: bool If True, enable the mesh shading. backface_culling: bool If True, enable backface culling on the mesh. kwargs: args The arguments to pass to triangular_mesh """ surface = self.mlab.triangular_mesh(x, y, z, triangles, color=color, opacity=opacity, figure=self.fig, **kwargs) surface.actor.property.shading = shading surface.actor.property.backface_culling = backface_culling return surface def contour(self, surface, scalars, contours, line_width=1.0, opacity=1.0, vmin=None, vmax=None, colormap=None): """Add a contour in the scene. Parameters ---------- surface: surface object The mesh to use as support for contour. scalars: ndarray, shape (n_vertices,) The scalar valued associated to the vertices. contours: int | list Specifying a list of values will only give the requested contours. line_width: float The width of the lines. opacity: float The opacity of the contour. vmin: float | None vmin is used to scale the colormap. If None, the min of the data will be used vmax: float | None vmax is used to scale the colormap. If None, the max of the data will be used colormap: The colormap to use. """ mesh = _create_mesh_surf(surface, self.fig, scalars=scalars) cont = self.mlab.pipeline.contour_surface( mesh, contours=contours, line_width=1.0, vmin=vmin, vmax=vmax, opacity=opacity, figure=self.fig) cont.module_manager.scalar_lut_manager.lut.table = colormap def surface(self, surface, color=None, opacity=1.0, vmin=None, vmax=None, colormap=None, scalars=None, backface_culling=False): """Add a surface in the scene. Parameters ---------- surface: surface object The information describing the surface. color: tuple The color of the surface: (red, green, blue). opacity: float The opacity of the surface. vmin: float | None vmin is used to scale the colormap. If None, the min of the data will be used vmax: float | None vmax is used to scale the colormap. If None, the max of the data will be used colormap: The colormap to use. backface_culling: bool If True, enable backface culling on the surface. """ # Make a solid surface mesh = _create_mesh_surf(surface, self.fig, scalars=scalars) surface = self.mlab.pipeline.surface( mesh, color=color, opacity=opacity, vmin=vmin, vmax=vmax, figure=self.fig) if colormap is not None: surface.module_manager.scalar_lut_manager.lut.table = colormap surface.actor.property.backface_culling = backface_culling def sphere(self, center, color, scale, opacity=1.0, backface_culling=False): """Add sphere in the scene. Parameters ---------- center: ndarray, shape(n_center, 3) The list of centers to use for the sphere(s). color: tuple The color of the sphere(s): (red, green, blue). scale: float The scale of the sphere(s). opacity: float The opacity of the sphere(s). backface_culling: bool If True, enable backface culling on the sphere(s). """ if center.ndim == 1: x = center[0] y = center[1] z = center[2] elif center.ndim == 2: x = center[:, 0] y = center[:, 1] z = center[:, 2] surface = self.mlab.points3d(x, y, z, color=color, scale_factor=scale, opacity=opacity, figure=self.fig) surface.actor.property.backface_culling = backface_culling def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, glyph_height=None, glyph_center=None, glyph_resolution=None, opacity=1.0, scale_mode='none', scalars=None, backface_culling=False): """Add quiver3d in the scene. Parameters ---------- x: array, shape (n_quivers,) The X component of the position of the quiver. y: array, shape (n_quivers,) The Y component of the position of the quiver. z: array, shape (n_quivers,) The Z component of the position of the quiver. u: array, shape (n_quivers,) The last X component of the quiver. v: array, shape (n_quivers,) The last Y component of the quiver. w: array, shape (n_quivers,) The last Z component of the quiver. color: tuple The color of the quiver: (red, green, blue). scale: float The scale of the quiver. mode: 'arrow' or 'cylinder' The type of the quiver. resolution: float The resolution of the arrow. glyph_height: float The height of the glyph used with the quiver. glyph_center: tuple The center of the glyph used with the quiver: (x, y, z). glyph_resolution: float The resolution of the glyph used with the quiver. opacity: float The opacity of the quiver. scale_mode: 'vector', 'scalar' or 'none' The scaling mode for the glyph. scalars: array, shape (n_quivers,) | None The optional scalar data to use. backface_culling: bool If True, enable backface culling on the quiver. """ if mode == 'arrow': self.mlab.quiver3d(x, y, z, u, v, w, mode=mode, color=color, scale_factor=scale, scale_mode=scale_mode, resolution=resolution, scalars=scalars, opacity=opacity, figure=self.fig) elif mode == 'cylinder': quiv = self.mlab.quiver3d(x, y, z, u, v, w, mode=mode, color=color, scale_factor=scale, opacity=opacity, figure=self.fig) quiv.glyph.glyph_source.glyph_source.height = glyph_height quiv.glyph.glyph_source.glyph_source.center = glyph_center quiv.glyph.glyph_source.glyph_source.resolution = glyph_resolution quiv.actor.property.backface_culling = backface_culling def text(self, x, y, text, width): """Add test in the scene. Parameters ---------- x: float The X component to use as position of the text. y: float The Y component to use as position of the text. text: str The content of the text. width: float The width of the text. """ self.mlab.text(x, y, text, width=width, figure=self.fig) def show(self): """Render the scene.""" _toggle_mlab_render(self.fig, True) def set_camera(self, azimuth=None, elevation=None, distance=None, focalpoint=None): """Configure the camera of the scene. Parameters ---------- azimuth: float The azimuthal angle of the camera. elevation: float The zenith angle of the camera. distance: float The distance to the focal point. focalpoint: tuple The focal point of the camera: (x, y, z). """ self.mlab.view(azimuth, elevation, distance, focalpoint=focalpoint, figure=self.fig) def screenshot(self): """Take a screenshot of the scene.""" return self.mlab.screenshot(self.fig) def project(self, xyz, ch_names): """Convert 3d points to a 2d perspective. Parameters ---------- xyz: array, shape(n_points, 3) The points to project. ch_names: array, shape(_n_points,) Names of the channels. """ xy = _3d_to_2d(self.fig, xyz) xy = dict(zip(ch_names, xy)) pts = self.fig.children[-1] return _Projection(xy=xy, pts=pts) def _mlab_figure(**kwargs): """Create a Mayavi figure using our defaults.""" from mayavi import mlab fig = mlab.figure(**kwargs) # If using modern VTK/Mayavi, improve rendering with FXAA if hasattr(getattr(fig.scene, 'renderer', None), 'use_fxaa'): fig.scene.renderer.use_fxaa = True return fig def _toggle_mlab_render(fig, render): mlab = _import_mlab() if mlab.options.backend != 'test': fig.scene.disable_render = not render def _create_mesh_surf(surf, fig=None, scalars=None, vtk_normals=True): """Create Mayavi mesh from MNE surf.""" mlab = _import_mlab() x, y, z = surf['rr'].T with warnings.catch_warnings(record=True): # traits mesh = mlab.pipeline.triangular_mesh_source( x, y, z, surf['tris'], scalars=scalars, figure=fig) if vtk_normals: mesh = mlab.pipeline.poly_data_normals(mesh) mesh.filter.compute_cell_normals = False mesh.filter.consistency = False mesh.filter.non_manifold_traversal = False mesh.filter.splitting = False else: # make absolutely sure these are normalized for Mayavi nn = surf['nn'].copy() _normalize_vectors(nn) mesh.data.point_data.normals = nn mesh.data.cell_data.normals = None return mesh def _3d_to_2d(fig, xyz): """Convert 3d points to a 2d perspective using a Mayavi Scene.""" from mayavi.core.scene import Scene _validate_type(fig, Scene, "fig", "Scene") xyz = np.column_stack([xyz, np.ones(xyz.shape[0])]) # Transform points into 'unnormalized' view coordinates comb_trans_mat = _get_world_to_view_matrix(fig.scene) view_coords = np.dot(comb_trans_mat, xyz.T).T # Divide through by the fourth element for normalized view coords norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1)) # Transform from normalized view coordinates to display coordinates. view_to_disp_mat = _get_view_to_display_matrix(fig.scene) xy = np.dot(view_to_disp_mat, norm_view_coords.T).T # Pull the first two columns since they're meaningful for 2d plotting xy = xy[:, :2] return xy def _get_world_to_view_matrix(scene): """Return the 4x4 matrix to transform xyz space to the current view. This is a concatenation of the model view and perspective transforms. """ from mayavi.core.ui.mayavi_scene import MayaviScene from tvtk.pyface.tvtk_scene import TVTKScene _validate_type(scene, (MayaviScene, TVTKScene), "scene", "TVTKScene/MayaviScene") cam = scene.camera # The VTK method needs the aspect ratio and near and far # clipping planes in order to return the proper transform. scene_size = tuple(scene.get_size()) clip_range = cam.clipping_range aspect_ratio = float(scene_size[0]) / scene_size[1] # Get the vtk matrix object using the aspect ratio we defined vtk_comb_trans_mat = cam.get_composite_projection_transform_matrix( aspect_ratio, clip_range[0], clip_range[1]) vtk_comb_trans_mat = vtk_comb_trans_mat.to_array() return vtk_comb_trans_mat def _get_view_to_display_matrix(scene): """Return the 4x4 matrix to convert view coordinates to display coordinates. It's assumed that the view should take up the entire window and that the origin of the window is in the upper left corner. """ # noqa: E501 from mayavi.core.ui.mayavi_scene import MayaviScene from tvtk.pyface.tvtk_scene import TVTKScene _validate_type(scene, (MayaviScene, TVTKScene), "scene", "TVTKScene/MayaviScene") # normalized view coordinates have the origin in the middle of the space # so we need to scale by width and height of the display window and shift # by half width and half height. The matrix accomplishes that. x, y = tuple(scene.get_size()) view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0], [0., -y / 2.0, 0., y / 2.0], [0., 0., 1., 0.], [0., 0., 0., 1.]]) return view_to_disp_mat
0.939941
0.552992
import os import sys file_dir = os.path.dirname(__file__) sys.path.append(file_dir) from http.server import BaseHTTPRequestHandler, HTTPServer from intercom.client import Client import generic import sys import gspread from oauth2client.service_account import ServiceAccountCredentials import string import webbrowser class HTTPServer_Intercom(BaseHTTPRequestHandler): def _set_headers(self): self.send_response(200) self.send_header('Content-type', 'text') self.end_headers() def do_GET(self): self._set_headers() # Google Spreadsheets Credentials scope = ['http://spreadsheets.google.com/feeds'] creds = ServiceAccountCredentials.from_json_keyfile_name(sys.path[0] + "/src/client_secret.json", scope) client = gspread.authorize(creds) # Intercom Access Token # the variable intercom holds the Intercom Access Token but it is removed for privacy reason. # Reset the Applicants in a worksheet of a Google Spreadsheet document spreadsheet_title = input("Title of your spreadsheet (e.g. Fall 2018 Regular Decision Challenge QA): ") worksheet_title = input("Title of your worksheet: ") admin_name = input("What's your Intercom name? ") sheet = client.open(spreadsheet_title).worksheet(worksheet_title) result = sheet.get_all_records() for row, applicant in enumerate(result): if applicant.get("Admin Action Taken", "None") != "None" and applicant.get("Emailed?", "Yes") in ( "No", "no"): # reset each applicant applicant_email = applicant.get("Email") try: user = intercom.users.find(email=applicant_email) reason = applicant.get("Reason to Reset") actions = applicant.get("Admin Action Taken", "").split(" ") translator = str.maketrans('', '', string.punctuation) actions = [action.translate(translator) for action in actions] to_reset = { "Understanding": False, "Writing": False, "Math": False, "Creativity": False, "Reasoning": False } for action in actions: if action in to_reset: to_reset[action] = True for challenge in to_reset: if to_reset[challenge] is True: msg = generic.GenericChallengeReset(admin=admin_name, challenge=challenge, reason=reason) subject, body = msg.response()["subject"], msg.response()["body"] email = intercom.messages.create(**{ "message_type": "email", "subject": subject, "body": body, "template": "plain", "from": { "type": "admin", "id": "1482390" }, "to": { "type": "user", "id": user.id } }) sheet.update_acell('K{}'.format(row+2), 'Yes') print("{} is notified about the reset.".format(applicant_email)) except: message = sys.exc_info()[1] print(message) print("Please make sure that the email \"{}\" is registered on Intercom".format(email)) def do_HEAD(self): self._set_headers() def do_POST(self): # Doesn't do anything with posted data pass def run_server(): print("Starting server http://127.0.0.1:8080/...") server_address = ('127.0.0.1', 8080) httpd = HTTPServer(server_address, HTTPServer_Intercom) print("Running server http://127.0.0.1:8080/...") webbrowser.open("http://127.0.0.1:8080/") httpd.serve_forever()
src/server.py
import os import sys file_dir = os.path.dirname(__file__) sys.path.append(file_dir) from http.server import BaseHTTPRequestHandler, HTTPServer from intercom.client import Client import generic import sys import gspread from oauth2client.service_account import ServiceAccountCredentials import string import webbrowser class HTTPServer_Intercom(BaseHTTPRequestHandler): def _set_headers(self): self.send_response(200) self.send_header('Content-type', 'text') self.end_headers() def do_GET(self): self._set_headers() # Google Spreadsheets Credentials scope = ['http://spreadsheets.google.com/feeds'] creds = ServiceAccountCredentials.from_json_keyfile_name(sys.path[0] + "/src/client_secret.json", scope) client = gspread.authorize(creds) # Intercom Access Token # the variable intercom holds the Intercom Access Token but it is removed for privacy reason. # Reset the Applicants in a worksheet of a Google Spreadsheet document spreadsheet_title = input("Title of your spreadsheet (e.g. Fall 2018 Regular Decision Challenge QA): ") worksheet_title = input("Title of your worksheet: ") admin_name = input("What's your Intercom name? ") sheet = client.open(spreadsheet_title).worksheet(worksheet_title) result = sheet.get_all_records() for row, applicant in enumerate(result): if applicant.get("Admin Action Taken", "None") != "None" and applicant.get("Emailed?", "Yes") in ( "No", "no"): # reset each applicant applicant_email = applicant.get("Email") try: user = intercom.users.find(email=applicant_email) reason = applicant.get("Reason to Reset") actions = applicant.get("Admin Action Taken", "").split(" ") translator = str.maketrans('', '', string.punctuation) actions = [action.translate(translator) for action in actions] to_reset = { "Understanding": False, "Writing": False, "Math": False, "Creativity": False, "Reasoning": False } for action in actions: if action in to_reset: to_reset[action] = True for challenge in to_reset: if to_reset[challenge] is True: msg = generic.GenericChallengeReset(admin=admin_name, challenge=challenge, reason=reason) subject, body = msg.response()["subject"], msg.response()["body"] email = intercom.messages.create(**{ "message_type": "email", "subject": subject, "body": body, "template": "plain", "from": { "type": "admin", "id": "1482390" }, "to": { "type": "user", "id": user.id } }) sheet.update_acell('K{}'.format(row+2), 'Yes') print("{} is notified about the reset.".format(applicant_email)) except: message = sys.exc_info()[1] print(message) print("Please make sure that the email \"{}\" is registered on Intercom".format(email)) def do_HEAD(self): self._set_headers() def do_POST(self): # Doesn't do anything with posted data pass def run_server(): print("Starting server http://127.0.0.1:8080/...") server_address = ('127.0.0.1', 8080) httpd = HTTPServer(server_address, HTTPServer_Intercom) print("Running server http://127.0.0.1:8080/...") webbrowser.open("http://127.0.0.1:8080/") httpd.serve_forever()
0.119395
0.116337
from __future__ import annotations import logging from dataclasses import dataclass from pathlib import Path from typing import Any, ClassVar, Iterable, cast from pants.base.build_environment import get_buildroot from pants.base.build_root import BuildRoot from pants.base.exiter import PANTS_SUCCEEDED_EXIT_CODE from pants.base.specs import Specs from pants.bsp.protocol import BSPHandlerMapping from pants.build_graph.build_configuration import BuildConfiguration from pants.core.util_rules import system_binaries from pants.engine import desktop, environment, fs, platform, process from pants.engine.console import Console from pants.engine.fs import PathGlobs, Snapshot, Workspace from pants.engine.goal import Goal from pants.engine.internals import build_files, graph, options_parsing from pants.engine.internals.native_engine import PyExecutor, PySessionCancellationLatch from pants.engine.internals.parser import Parser from pants.engine.internals.scheduler import Scheduler, SchedulerSession from pants.engine.internals.selectors import Params from pants.engine.internals.session import SessionValues from pants.engine.rules import QueryRule, collect_rules, rule from pants.engine.streaming_workunit_handler import rules as streaming_workunit_handler_rules from pants.engine.target import RegisteredTargetTypes from pants.engine.unions import UnionMembership, UnionRule from pants.init import specs_calculator from pants.option.global_options import ( DEFAULT_EXECUTION_OPTIONS, DynamicRemoteOptions, ExecutionOptions, GlobalOptions, LocalStoreOptions, ) from pants.option.option_value_container import OptionValueContainer from pants.option.subsystem import Subsystem from pants.util.logging import LogLevel from pants.util.ordered_set import FrozenOrderedSet from pants.vcs.changed import rules as changed_rules from pants.vcs.git import rules as git_rules logger = logging.getLogger(__name__) @dataclass(frozen=True) class GraphScheduler: """A thin wrapper around a Scheduler configured with @rules.""" scheduler: Scheduler goal_map: Any def new_session( self, build_id, dynamic_ui: bool = False, ui_use_prodash: bool = False, use_colors=True, max_workunit_level: LogLevel = LogLevel.DEBUG, session_values: SessionValues | None = None, cancellation_latch: PySessionCancellationLatch | None = None, ) -> GraphSession: session = self.scheduler.new_session( build_id, dynamic_ui, ui_use_prodash, max_workunit_level=max_workunit_level, session_values=session_values, cancellation_latch=cancellation_latch, ) console = Console(use_colors=use_colors, session=session if dynamic_ui else None) return GraphSession(session, console, self.goal_map) @dataclass(frozen=True) class GraphSession: """A thin wrapper around a SchedulerSession configured with @rules.""" scheduler_session: SchedulerSession console: Console goal_map: Any # NB: Keep this in sync with the method `run_goal_rules`. goal_param_types: ClassVar[tuple[type, ...]] = (Specs, Console, Workspace) def goal_consumed_subsystem_scopes(self, goal_name: str) -> tuple[str, ...]: """Return the scopes of subsystems that could be consumed while running the given goal.""" goal_product = self.goal_map.get(goal_name) if not goal_product: return tuple() consumed_types = self.goal_consumed_types(goal_product) return tuple( sorted({typ.options_scope for typ in consumed_types if issubclass(typ, Subsystem)}) ) def goal_consumed_types(self, goal_product: type) -> set[type]: """Return the set of types that could possibly be consumed while running the given goal.""" return set( self.scheduler_session.scheduler.rule_graph_consumed_types( self.goal_param_types, goal_product ) ) def run_goal_rules( self, *, union_membership: UnionMembership, goals: Iterable[str], specs: Specs, poll: bool = False, poll_delay: float | None = None, ) -> int: """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :returns: An exit code. """ workspace = Workspace(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] # NB: We no-op for goals that have no implementation because no relevant backends are # registered. We might want to reconsider the behavior to instead warn or error when # trying to run something like `./pants run` without any backends registered. if not goal_product.subsystem_cls.activated(union_membership): continue # NB: Keep this in sync with the property `goal_param_types`. params = Params(specs, self.console, workspace) logger.debug(f"requesting {goal_product} to satisfy execution of `{goal}` goal") try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params, poll=poll, poll_delay=poll_delay ) finally: self.console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE class EngineInitializer: """Constructs the components necessary to run the engine.""" class GoalMappingError(Exception): """Raised when a goal cannot be mapped to an @rule.""" @staticmethod def _make_goal_map_from_rules(rules): goal_map = {} for r in rules: output_type = getattr(r, "output_type", None) if not output_type or not issubclass(output_type, Goal): continue goal = r.output_type.name deprecated_goal = r.output_type.subsystem_cls.deprecated_options_scope for goal_name in [goal, deprecated_goal] if deprecated_goal else [goal]: if goal_name in goal_map: raise EngineInitializer.GoalMappingError( f"could not map goal `{goal_name}` to rule `{r}`: already claimed by product " f"`{goal_map[goal_name]}`" ) goal_map[goal_name] = r.output_type return goal_map @staticmethod def setup_graph( bootstrap_options: OptionValueContainer, build_configuration: BuildConfiguration, dynamic_remote_options: DynamicRemoteOptions, executor: PyExecutor | None = None, ) -> GraphScheduler: build_root = get_buildroot() executor = executor or GlobalOptions.create_py_executor(bootstrap_options) execution_options = ExecutionOptions.from_options(bootstrap_options, dynamic_remote_options) local_store_options = LocalStoreOptions.from_options(bootstrap_options) return EngineInitializer.setup_graph_extended( build_configuration, execution_options, executor=executor, pants_ignore_patterns=GlobalOptions.compute_pants_ignore(build_root, bootstrap_options), use_gitignore=bootstrap_options.pants_ignore_use_gitignore, local_store_options=local_store_options, local_execution_root_dir=bootstrap_options.local_execution_root_dir, named_caches_dir=bootstrap_options.named_caches_dir, ca_certs_path=bootstrap_options.ca_certs_path, build_root=build_root, include_trace_on_error=bootstrap_options.print_stacktrace, engine_visualize_to=bootstrap_options.engine_visualize_to, watch_filesystem=bootstrap_options.watch_filesystem, ) @staticmethod def setup_graph_extended( build_configuration: BuildConfiguration, execution_options: ExecutionOptions, *, executor: PyExecutor, pants_ignore_patterns: list[str], use_gitignore: bool, local_store_options: LocalStoreOptions, local_execution_root_dir: str, named_caches_dir: str, ca_certs_path: str | None = None, build_root: str | None = None, include_trace_on_error: bool = True, engine_visualize_to: str | None = None, watch_filesystem: bool = True, ) -> GraphScheduler: build_root_path = build_root or get_buildroot() rules = build_configuration.rules union_membership: UnionMembership registered_target_types = RegisteredTargetTypes.create(build_configuration.target_types) execution_options = execution_options or DEFAULT_EXECUTION_OPTIONS @rule def parser_singleton() -> Parser: return Parser( build_root=build_root_path, target_type_aliases=registered_target_types.aliases, object_aliases=build_configuration.registered_aliases, ) @rule def build_configuration_singleton() -> BuildConfiguration: return build_configuration @rule def registered_target_types_singleton() -> RegisteredTargetTypes: return registered_target_types @rule def union_membership_singleton() -> UnionMembership: return union_membership @rule def build_root_singleton() -> BuildRoot: return cast(BuildRoot, BuildRoot.instance) # Create a Scheduler containing graph and filesystem rules, with no installed goals. rules = FrozenOrderedSet( ( *collect_rules(locals()), *build_files.rules(), *fs.rules(), *environment.rules(), *desktop.rules(), *git_rules(), *graph.rules(), *options_parsing.rules(), *process.rules(), *system_binaries.rules(), *platform.rules(), *changed_rules(), *streaming_workunit_handler_rules(), *specs_calculator.rules(), *rules, ) ) goal_map = EngineInitializer._make_goal_map_from_rules(rules) union_membership = UnionMembership.from_rules( ( *build_configuration.union_rules, *(r for r in rules if isinstance(r, UnionRule)), ) ) rules = FrozenOrderedSet( ( *rules, # Install queries for each Goal. *( QueryRule(goal_type, GraphSession.goal_param_types) for goal_type in goal_map.values() ), # Install queries for each request/response pair used by the BSP support. # Note: These are necessary because the BSP support is a built-in goal that makes # synchronous requests into the engine. *( QueryRule(impl.response_type, (impl.request_type, Workspace)) for impl in union_membership.get(BSPHandlerMapping) ), QueryRule(Snapshot, [PathGlobs]), # Used by the SchedulerService. ) ) def ensure_absolute_path(v: str) -> str: return Path(v).resolve().as_posix() def ensure_optional_absolute_path(v: str | None) -> str | None: if v is None: return None return ensure_absolute_path(v) scheduler = Scheduler( ignore_patterns=pants_ignore_patterns, use_gitignore=use_gitignore, build_root=build_root_path, local_execution_root_dir=ensure_absolute_path(local_execution_root_dir), named_caches_dir=ensure_absolute_path(named_caches_dir), ca_certs_path=ensure_optional_absolute_path(ca_certs_path), rules=rules, union_membership=union_membership, executor=executor, execution_options=execution_options, local_store_options=local_store_options, include_trace_on_error=include_trace_on_error, visualize_to_dir=engine_visualize_to, watch_filesystem=watch_filesystem, ) return GraphScheduler(scheduler, goal_map)
src/python/pants/init/engine_initializer.py
from __future__ import annotations import logging from dataclasses import dataclass from pathlib import Path from typing import Any, ClassVar, Iterable, cast from pants.base.build_environment import get_buildroot from pants.base.build_root import BuildRoot from pants.base.exiter import PANTS_SUCCEEDED_EXIT_CODE from pants.base.specs import Specs from pants.bsp.protocol import BSPHandlerMapping from pants.build_graph.build_configuration import BuildConfiguration from pants.core.util_rules import system_binaries from pants.engine import desktop, environment, fs, platform, process from pants.engine.console import Console from pants.engine.fs import PathGlobs, Snapshot, Workspace from pants.engine.goal import Goal from pants.engine.internals import build_files, graph, options_parsing from pants.engine.internals.native_engine import PyExecutor, PySessionCancellationLatch from pants.engine.internals.parser import Parser from pants.engine.internals.scheduler import Scheduler, SchedulerSession from pants.engine.internals.selectors import Params from pants.engine.internals.session import SessionValues from pants.engine.rules import QueryRule, collect_rules, rule from pants.engine.streaming_workunit_handler import rules as streaming_workunit_handler_rules from pants.engine.target import RegisteredTargetTypes from pants.engine.unions import UnionMembership, UnionRule from pants.init import specs_calculator from pants.option.global_options import ( DEFAULT_EXECUTION_OPTIONS, DynamicRemoteOptions, ExecutionOptions, GlobalOptions, LocalStoreOptions, ) from pants.option.option_value_container import OptionValueContainer from pants.option.subsystem import Subsystem from pants.util.logging import LogLevel from pants.util.ordered_set import FrozenOrderedSet from pants.vcs.changed import rules as changed_rules from pants.vcs.git import rules as git_rules logger = logging.getLogger(__name__) @dataclass(frozen=True) class GraphScheduler: """A thin wrapper around a Scheduler configured with @rules.""" scheduler: Scheduler goal_map: Any def new_session( self, build_id, dynamic_ui: bool = False, ui_use_prodash: bool = False, use_colors=True, max_workunit_level: LogLevel = LogLevel.DEBUG, session_values: SessionValues | None = None, cancellation_latch: PySessionCancellationLatch | None = None, ) -> GraphSession: session = self.scheduler.new_session( build_id, dynamic_ui, ui_use_prodash, max_workunit_level=max_workunit_level, session_values=session_values, cancellation_latch=cancellation_latch, ) console = Console(use_colors=use_colors, session=session if dynamic_ui else None) return GraphSession(session, console, self.goal_map) @dataclass(frozen=True) class GraphSession: """A thin wrapper around a SchedulerSession configured with @rules.""" scheduler_session: SchedulerSession console: Console goal_map: Any # NB: Keep this in sync with the method `run_goal_rules`. goal_param_types: ClassVar[tuple[type, ...]] = (Specs, Console, Workspace) def goal_consumed_subsystem_scopes(self, goal_name: str) -> tuple[str, ...]: """Return the scopes of subsystems that could be consumed while running the given goal.""" goal_product = self.goal_map.get(goal_name) if not goal_product: return tuple() consumed_types = self.goal_consumed_types(goal_product) return tuple( sorted({typ.options_scope for typ in consumed_types if issubclass(typ, Subsystem)}) ) def goal_consumed_types(self, goal_product: type) -> set[type]: """Return the set of types that could possibly be consumed while running the given goal.""" return set( self.scheduler_session.scheduler.rule_graph_consumed_types( self.goal_param_types, goal_product ) ) def run_goal_rules( self, *, union_membership: UnionMembership, goals: Iterable[str], specs: Specs, poll: bool = False, poll_delay: float | None = None, ) -> int: """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :returns: An exit code. """ workspace = Workspace(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] # NB: We no-op for goals that have no implementation because no relevant backends are # registered. We might want to reconsider the behavior to instead warn or error when # trying to run something like `./pants run` without any backends registered. if not goal_product.subsystem_cls.activated(union_membership): continue # NB: Keep this in sync with the property `goal_param_types`. params = Params(specs, self.console, workspace) logger.debug(f"requesting {goal_product} to satisfy execution of `{goal}` goal") try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params, poll=poll, poll_delay=poll_delay ) finally: self.console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE class EngineInitializer: """Constructs the components necessary to run the engine.""" class GoalMappingError(Exception): """Raised when a goal cannot be mapped to an @rule.""" @staticmethod def _make_goal_map_from_rules(rules): goal_map = {} for r in rules: output_type = getattr(r, "output_type", None) if not output_type or not issubclass(output_type, Goal): continue goal = r.output_type.name deprecated_goal = r.output_type.subsystem_cls.deprecated_options_scope for goal_name in [goal, deprecated_goal] if deprecated_goal else [goal]: if goal_name in goal_map: raise EngineInitializer.GoalMappingError( f"could not map goal `{goal_name}` to rule `{r}`: already claimed by product " f"`{goal_map[goal_name]}`" ) goal_map[goal_name] = r.output_type return goal_map @staticmethod def setup_graph( bootstrap_options: OptionValueContainer, build_configuration: BuildConfiguration, dynamic_remote_options: DynamicRemoteOptions, executor: PyExecutor | None = None, ) -> GraphScheduler: build_root = get_buildroot() executor = executor or GlobalOptions.create_py_executor(bootstrap_options) execution_options = ExecutionOptions.from_options(bootstrap_options, dynamic_remote_options) local_store_options = LocalStoreOptions.from_options(bootstrap_options) return EngineInitializer.setup_graph_extended( build_configuration, execution_options, executor=executor, pants_ignore_patterns=GlobalOptions.compute_pants_ignore(build_root, bootstrap_options), use_gitignore=bootstrap_options.pants_ignore_use_gitignore, local_store_options=local_store_options, local_execution_root_dir=bootstrap_options.local_execution_root_dir, named_caches_dir=bootstrap_options.named_caches_dir, ca_certs_path=bootstrap_options.ca_certs_path, build_root=build_root, include_trace_on_error=bootstrap_options.print_stacktrace, engine_visualize_to=bootstrap_options.engine_visualize_to, watch_filesystem=bootstrap_options.watch_filesystem, ) @staticmethod def setup_graph_extended( build_configuration: BuildConfiguration, execution_options: ExecutionOptions, *, executor: PyExecutor, pants_ignore_patterns: list[str], use_gitignore: bool, local_store_options: LocalStoreOptions, local_execution_root_dir: str, named_caches_dir: str, ca_certs_path: str | None = None, build_root: str | None = None, include_trace_on_error: bool = True, engine_visualize_to: str | None = None, watch_filesystem: bool = True, ) -> GraphScheduler: build_root_path = build_root or get_buildroot() rules = build_configuration.rules union_membership: UnionMembership registered_target_types = RegisteredTargetTypes.create(build_configuration.target_types) execution_options = execution_options or DEFAULT_EXECUTION_OPTIONS @rule def parser_singleton() -> Parser: return Parser( build_root=build_root_path, target_type_aliases=registered_target_types.aliases, object_aliases=build_configuration.registered_aliases, ) @rule def build_configuration_singleton() -> BuildConfiguration: return build_configuration @rule def registered_target_types_singleton() -> RegisteredTargetTypes: return registered_target_types @rule def union_membership_singleton() -> UnionMembership: return union_membership @rule def build_root_singleton() -> BuildRoot: return cast(BuildRoot, BuildRoot.instance) # Create a Scheduler containing graph and filesystem rules, with no installed goals. rules = FrozenOrderedSet( ( *collect_rules(locals()), *build_files.rules(), *fs.rules(), *environment.rules(), *desktop.rules(), *git_rules(), *graph.rules(), *options_parsing.rules(), *process.rules(), *system_binaries.rules(), *platform.rules(), *changed_rules(), *streaming_workunit_handler_rules(), *specs_calculator.rules(), *rules, ) ) goal_map = EngineInitializer._make_goal_map_from_rules(rules) union_membership = UnionMembership.from_rules( ( *build_configuration.union_rules, *(r for r in rules if isinstance(r, UnionRule)), ) ) rules = FrozenOrderedSet( ( *rules, # Install queries for each Goal. *( QueryRule(goal_type, GraphSession.goal_param_types) for goal_type in goal_map.values() ), # Install queries for each request/response pair used by the BSP support. # Note: These are necessary because the BSP support is a built-in goal that makes # synchronous requests into the engine. *( QueryRule(impl.response_type, (impl.request_type, Workspace)) for impl in union_membership.get(BSPHandlerMapping) ), QueryRule(Snapshot, [PathGlobs]), # Used by the SchedulerService. ) ) def ensure_absolute_path(v: str) -> str: return Path(v).resolve().as_posix() def ensure_optional_absolute_path(v: str | None) -> str | None: if v is None: return None return ensure_absolute_path(v) scheduler = Scheduler( ignore_patterns=pants_ignore_patterns, use_gitignore=use_gitignore, build_root=build_root_path, local_execution_root_dir=ensure_absolute_path(local_execution_root_dir), named_caches_dir=ensure_absolute_path(named_caches_dir), ca_certs_path=ensure_optional_absolute_path(ca_certs_path), rules=rules, union_membership=union_membership, executor=executor, execution_options=execution_options, local_store_options=local_store_options, include_trace_on_error=include_trace_on_error, visualize_to_dir=engine_visualize_to, watch_filesystem=watch_filesystem, ) return GraphScheduler(scheduler, goal_map)
0.864825
0.119024
import configparser import os import re import sys from contextlib import closing, contextmanager import yaml from . import base, utils SubstituteRegex = re.compile(r"\$\{(?P<var>(\w|:)+)\}") def load_config_arguments(args): parser = utils.default_parser("config") parser.add_argument("option", nargs="?", help="Show only specific information") return parser.parse_known_args(args) # pylint: disable=too-many-public-methods class Environment: """ Bootstrap environment """ def __init__(self, cfg): utils.info("Loading configuration file") self._config = {} self._load_config(cfg) self._load_config("odoo.versions.yaml", False) self._post_process_config() def _substitute(self, match, sub=True): """ Replaces the matched parts with the variable """ var = match.groupdict().get("var", "").split(":") if not all(var): raise SyntaxError() result = self.get(*var) return str(result) if sub else result def _substitute_string(self, line): """ Substitute variables in strings """ match = SubstituteRegex.fullmatch(line) if match: return self._substitute(match, False) return SubstituteRegex.sub(self._substitute, line) def _substitute_dict(self, data): """ Substitute variables in dictionaries """ tmp = {} for sec, section in data.items(): if isinstance(section, str): tmp[sec] = self._substitute_string(section) elif isinstance(section, list): tmp[sec] = self._substitute_list(section) elif isinstance(section, dict): tmp[sec] = self._substitute_dict(section) else: tmp[sec] = section return tmp def _substitute_list(self, ls): """ Substitute variables in lists """ tmp = [] for x in ls: if isinstance(x, dict): tmp.append(self._substitute_dict(x)) elif isinstance(x, str): tmp.append(self._substitute_string(x)) elif isinstance(x, list): tmp.append(self._substitute_list(x)) else: tmp.append(x) return tmp def _post_process_config(self): """ Post process the configuration by replacing variables """ # Include environment variables first for later substitutions for env, keys in base.ENVIRONMENT.items(): if os.environ.get(env): self.set(*keys, value=os.environ[env]) options = self.get("odoo", "options", default={}) for key, value in options.items(): options[key] = os.environ.get(f"ODOO_{key.upper()}") or value # Run the substitution on the configuration self._config = self._substitute_dict(self._config) # Combine the addon paths current = set(self.get("odoo", "addons_path", default=[])) current.update( { section.get("addon_path", sec) for sec, section in self.get("repos", default={}).items() } ) # Generate the addon paths current = set(map(os.path.abspath, current)) self.set("odoo", "options", "addons_path", value=current) def get(self, *key, default=None): """ Get a specific value of the configuration """ data = self._config try: for k in key: data = data[k] if data is None: return default return data except KeyError: return default def opt(self, *key, default=None): """ Short cut to directly access odoo options """ return self.get("odoo", "options", *key, default=default) def set(self, *key, value=None): """ Set a specific value of the configuration """ data = self._config for k in key[:-1]: data = data[k] data[key[-1]] = value def _load_config(self, cfg, raise_if_missing=True): """ Load and process a configuration file """ if not os.path.isfile(cfg) and not raise_if_missing: utils.warn(f" * {cfg}") return utils.info(f" * {cfg}") with open(cfg) as fp: options = yaml.load(fp, Loader=yaml.FullLoader) # Load all base configuration files first extend = options.get(base.SECTION, {}).get("extend") if isinstance(extend, str): self._load_config(extend) elif isinstance(extend, list): for e in extend: self._load_config(e) elif extend is not None: raise TypeError(f"{base.SECTION}:extend must be str or list") # Merge the configurations self._config = utils.merge(self._config, options, replace=["merges"]) def _init_odoo(self): """ Initialize Odoo to enable the module import """ path = self.get(base.SECTION, "odoo") if not path: utils.error(f"No {base.SECTION}:odoo defined") return False path = os.path.abspath(path) if not os.path.isdir(path): utils.error("Missing odoo folder") return False if path not in sys.path: sys.path.append(path) return path @contextmanager def env(self, db_name, rollback=False): """ Create an environment from a registry """ # pylint: disable=C0415,E0401 import odoo # Get all installed modules reg = odoo.registry(db_name) with closing(reg.cursor()) as cr: yield odoo.api.Environment(cr, odoo.SUPERUSER_ID, {}) if rollback: cr.rollback() else: cr.commit() @contextmanager def _manage(self): """Wrap the manage to resolve version differrences""" import odoo import odoo.release if odoo.release.version_info >= (15,): yield else: with odoo.api.Environment.manage(): yield def generate_config(self): """ Generate the Odoo configuration file """ utils.info("Generating configuration file") cp = configparser.ConfigParser() # Generate the configuration with the sections options = self.get("odoo", "options", default={}) for key, value in sorted(options.items()): if key == "load_language": continue if "." in key: sec, key = key.split(".", 1) else: sec = "options" if not cp.has_section(sec): cp.add_section(sec) if isinstance(value, (set, list)): cp.set(sec, key, ",".join(map(str, value))) elif value is None: cp.set(sec, key, "") else: cp.set(sec, key, str(value)) os.makedirs(os.path.dirname(base.ODOO_CONFIG), exist_ok=True) # Write the configuration with open(base.ODOO_CONFIG, "w+") as fp: cp.write(fp) def config(self, args=None): """ Simply output the rendered configuration file """ args, _ = load_config_arguments(args or []) if args.option: return yaml.dump(self.get(*args.option.split(":"))) return yaml.dump(self._config)
src/doblib/env.py
import configparser import os import re import sys from contextlib import closing, contextmanager import yaml from . import base, utils SubstituteRegex = re.compile(r"\$\{(?P<var>(\w|:)+)\}") def load_config_arguments(args): parser = utils.default_parser("config") parser.add_argument("option", nargs="?", help="Show only specific information") return parser.parse_known_args(args) # pylint: disable=too-many-public-methods class Environment: """ Bootstrap environment """ def __init__(self, cfg): utils.info("Loading configuration file") self._config = {} self._load_config(cfg) self._load_config("odoo.versions.yaml", False) self._post_process_config() def _substitute(self, match, sub=True): """ Replaces the matched parts with the variable """ var = match.groupdict().get("var", "").split(":") if not all(var): raise SyntaxError() result = self.get(*var) return str(result) if sub else result def _substitute_string(self, line): """ Substitute variables in strings """ match = SubstituteRegex.fullmatch(line) if match: return self._substitute(match, False) return SubstituteRegex.sub(self._substitute, line) def _substitute_dict(self, data): """ Substitute variables in dictionaries """ tmp = {} for sec, section in data.items(): if isinstance(section, str): tmp[sec] = self._substitute_string(section) elif isinstance(section, list): tmp[sec] = self._substitute_list(section) elif isinstance(section, dict): tmp[sec] = self._substitute_dict(section) else: tmp[sec] = section return tmp def _substitute_list(self, ls): """ Substitute variables in lists """ tmp = [] for x in ls: if isinstance(x, dict): tmp.append(self._substitute_dict(x)) elif isinstance(x, str): tmp.append(self._substitute_string(x)) elif isinstance(x, list): tmp.append(self._substitute_list(x)) else: tmp.append(x) return tmp def _post_process_config(self): """ Post process the configuration by replacing variables """ # Include environment variables first for later substitutions for env, keys in base.ENVIRONMENT.items(): if os.environ.get(env): self.set(*keys, value=os.environ[env]) options = self.get("odoo", "options", default={}) for key, value in options.items(): options[key] = os.environ.get(f"ODOO_{key.upper()}") or value # Run the substitution on the configuration self._config = self._substitute_dict(self._config) # Combine the addon paths current = set(self.get("odoo", "addons_path", default=[])) current.update( { section.get("addon_path", sec) for sec, section in self.get("repos", default={}).items() } ) # Generate the addon paths current = set(map(os.path.abspath, current)) self.set("odoo", "options", "addons_path", value=current) def get(self, *key, default=None): """ Get a specific value of the configuration """ data = self._config try: for k in key: data = data[k] if data is None: return default return data except KeyError: return default def opt(self, *key, default=None): """ Short cut to directly access odoo options """ return self.get("odoo", "options", *key, default=default) def set(self, *key, value=None): """ Set a specific value of the configuration """ data = self._config for k in key[:-1]: data = data[k] data[key[-1]] = value def _load_config(self, cfg, raise_if_missing=True): """ Load and process a configuration file """ if not os.path.isfile(cfg) and not raise_if_missing: utils.warn(f" * {cfg}") return utils.info(f" * {cfg}") with open(cfg) as fp: options = yaml.load(fp, Loader=yaml.FullLoader) # Load all base configuration files first extend = options.get(base.SECTION, {}).get("extend") if isinstance(extend, str): self._load_config(extend) elif isinstance(extend, list): for e in extend: self._load_config(e) elif extend is not None: raise TypeError(f"{base.SECTION}:extend must be str or list") # Merge the configurations self._config = utils.merge(self._config, options, replace=["merges"]) def _init_odoo(self): """ Initialize Odoo to enable the module import """ path = self.get(base.SECTION, "odoo") if not path: utils.error(f"No {base.SECTION}:odoo defined") return False path = os.path.abspath(path) if not os.path.isdir(path): utils.error("Missing odoo folder") return False if path not in sys.path: sys.path.append(path) return path @contextmanager def env(self, db_name, rollback=False): """ Create an environment from a registry """ # pylint: disable=C0415,E0401 import odoo # Get all installed modules reg = odoo.registry(db_name) with closing(reg.cursor()) as cr: yield odoo.api.Environment(cr, odoo.SUPERUSER_ID, {}) if rollback: cr.rollback() else: cr.commit() @contextmanager def _manage(self): """Wrap the manage to resolve version differrences""" import odoo import odoo.release if odoo.release.version_info >= (15,): yield else: with odoo.api.Environment.manage(): yield def generate_config(self): """ Generate the Odoo configuration file """ utils.info("Generating configuration file") cp = configparser.ConfigParser() # Generate the configuration with the sections options = self.get("odoo", "options", default={}) for key, value in sorted(options.items()): if key == "load_language": continue if "." in key: sec, key = key.split(".", 1) else: sec = "options" if not cp.has_section(sec): cp.add_section(sec) if isinstance(value, (set, list)): cp.set(sec, key, ",".join(map(str, value))) elif value is None: cp.set(sec, key, "") else: cp.set(sec, key, str(value)) os.makedirs(os.path.dirname(base.ODOO_CONFIG), exist_ok=True) # Write the configuration with open(base.ODOO_CONFIG, "w+") as fp: cp.write(fp) def config(self, args=None): """ Simply output the rendered configuration file """ args, _ = load_config_arguments(args or []) if args.option: return yaml.dump(self.get(*args.option.split(":"))) return yaml.dump(self._config)
0.470737
0.104706
from mathics.builtin.base import Builtin from mathics.builtin.assignments.internals import get_symbol_values from mathics.core.attributes import hold_all, protected class DefaultValues(Builtin): """ <dl> <dt>'DefaultValues[$symbol$]' <dd>gives the list of default values associated with $symbol$. <i>Note: this function is in Mathematica 5 but has been removed from current Mathematica.</i> </dl> >> Default[f, 1] = 4 = 4 >> DefaultValues[f] = {HoldPattern[Default[f, 1]] :> 4} You can assign values to 'DefaultValues': >> DefaultValues[g] = {Default[g] -> 3}; >> Default[g, 1] = 3 >> g[x_.] := {x} >> g[a] = {a} >> g[] = {3} """ attributes = hold_all | protected summary_text = ( "gives default values for the arguments associated with a function symbol" ) def apply(self, symbol, evaluation): "DefaultValues[symbol_]" return get_symbol_values(symbol, "System`DefaultValues", "default", evaluation) class Messages(Builtin): """ <dl> <dt>'Messages[$symbol$]' <dd>gives the list of messages associated with $symbol$. </dl> >> a::b = "foo" = foo >> Messages[a] = {HoldPattern[a::b] :> foo} >> Messages[a] = {a::c :> "bar"}; >> a::c // InputForm = "bar" >> Message[a::c] : bar """ attributes = hold_all | protected summary_text = "gives the list the messages associated with a particular symbol" def apply(self, symbol, evaluation): "Messages[symbol_]" return get_symbol_values(symbol, "Messages", "messages", evaluation) class NValues(Builtin): """ <dl> <dt>'NValues[$symbol$]' <dd>gives the list of numerical values associated with $symbol$. <i>Note: this function is in Mathematica 5 but has been removed from current Mathematica.</i> </dl> >> NValues[a] = {} >> N[a] = 3; >> NValues[a] = {HoldPattern[N[a, MachinePrecision]] :> 3} You can assign values to 'NValues': >> NValues[b] := {N[b, MachinePrecision] :> 2} >> N[b] = 2. Be sure to use 'SetDelayed', otherwise the left-hand side of the transformation rule will be evaluated immediately, causing the head of 'N' to get lost. Furthermore, you have to include the precision in the rules; 'MachinePrecision' will not be inserted automatically: >> NValues[c] := {N[c] :> 3} >> N[c] = c Mathics will gracefully assign any list of rules to 'NValues'; however, inappropriate rules will never be used: >> NValues[d] = {foo -> bar}; >> NValues[d] = {HoldPattern[foo] :> bar} >> N[d] = d """ attributes = hold_all | protected summary_text = "gives the list of numerical values associated with a symbol" def apply(self, symbol, evaluation): "NValues[symbol_]" return get_symbol_values(symbol, "NValues", "n", evaluation) class SubValues(Builtin): """ <dl> <dt>'SubValues[$symbol$]' <dd>gives the list of subvalues associated with $symbol$. <i>Note: this function is not in current Mathematica.</i> </dl> >> f[1][x_] := x >> f[2][x_] := x ^ 2 >> SubValues[f] = {HoldPattern[f[2][x_]] :> x ^ 2, HoldPattern[f[1][x_]] :> x} >> Definition[f] = f[2][x_] = x ^ 2 . . f[1][x_] = x """ attributes = hold_all | protected summary_text = "gives the list of subvalues associated with a symbol" def apply(self, symbol, evaluation): "SubValues[symbol_]" return get_symbol_values(symbol, "SubValues", "sub", evaluation)
mathics/builtin/assignments/types.py
from mathics.builtin.base import Builtin from mathics.builtin.assignments.internals import get_symbol_values from mathics.core.attributes import hold_all, protected class DefaultValues(Builtin): """ <dl> <dt>'DefaultValues[$symbol$]' <dd>gives the list of default values associated with $symbol$. <i>Note: this function is in Mathematica 5 but has been removed from current Mathematica.</i> </dl> >> Default[f, 1] = 4 = 4 >> DefaultValues[f] = {HoldPattern[Default[f, 1]] :> 4} You can assign values to 'DefaultValues': >> DefaultValues[g] = {Default[g] -> 3}; >> Default[g, 1] = 3 >> g[x_.] := {x} >> g[a] = {a} >> g[] = {3} """ attributes = hold_all | protected summary_text = ( "gives default values for the arguments associated with a function symbol" ) def apply(self, symbol, evaluation): "DefaultValues[symbol_]" return get_symbol_values(symbol, "System`DefaultValues", "default", evaluation) class Messages(Builtin): """ <dl> <dt>'Messages[$symbol$]' <dd>gives the list of messages associated with $symbol$. </dl> >> a::b = "foo" = foo >> Messages[a] = {HoldPattern[a::b] :> foo} >> Messages[a] = {a::c :> "bar"}; >> a::c // InputForm = "bar" >> Message[a::c] : bar """ attributes = hold_all | protected summary_text = "gives the list the messages associated with a particular symbol" def apply(self, symbol, evaluation): "Messages[symbol_]" return get_symbol_values(symbol, "Messages", "messages", evaluation) class NValues(Builtin): """ <dl> <dt>'NValues[$symbol$]' <dd>gives the list of numerical values associated with $symbol$. <i>Note: this function is in Mathematica 5 but has been removed from current Mathematica.</i> </dl> >> NValues[a] = {} >> N[a] = 3; >> NValues[a] = {HoldPattern[N[a, MachinePrecision]] :> 3} You can assign values to 'NValues': >> NValues[b] := {N[b, MachinePrecision] :> 2} >> N[b] = 2. Be sure to use 'SetDelayed', otherwise the left-hand side of the transformation rule will be evaluated immediately, causing the head of 'N' to get lost. Furthermore, you have to include the precision in the rules; 'MachinePrecision' will not be inserted automatically: >> NValues[c] := {N[c] :> 3} >> N[c] = c Mathics will gracefully assign any list of rules to 'NValues'; however, inappropriate rules will never be used: >> NValues[d] = {foo -> bar}; >> NValues[d] = {HoldPattern[foo] :> bar} >> N[d] = d """ attributes = hold_all | protected summary_text = "gives the list of numerical values associated with a symbol" def apply(self, symbol, evaluation): "NValues[symbol_]" return get_symbol_values(symbol, "NValues", "n", evaluation) class SubValues(Builtin): """ <dl> <dt>'SubValues[$symbol$]' <dd>gives the list of subvalues associated with $symbol$. <i>Note: this function is not in current Mathematica.</i> </dl> >> f[1][x_] := x >> f[2][x_] := x ^ 2 >> SubValues[f] = {HoldPattern[f[2][x_]] :> x ^ 2, HoldPattern[f[1][x_]] :> x} >> Definition[f] = f[2][x_] = x ^ 2 . . f[1][x_] = x """ attributes = hold_all | protected summary_text = "gives the list of subvalues associated with a symbol" def apply(self, symbol, evaluation): "SubValues[symbol_]" return get_symbol_values(symbol, "SubValues", "sub", evaluation)
0.803328
0.425904
def to_molsysmt_DataFrame(item, trajectory_item=None, atom_indices='all', structure_indices='all'): return item.dataframe def from_molsysmt_DataFrame(item, trajectory_item=None, atom_indices='all', structure_indices='all'): from molsysmt.native.topology import Topology from molsysmt.native import elements from numpy import arange atoms = [] groups = [] components = [] chains = [] entities = [] molecules = [] n_atoms = item.shape[0] # atoms, groups, chains, entities, molecules atom_index_array = item["atom.index"].to_numpy() atom_name_array = item["atom.name"].to_numpy() atom_id_array = item["atom.id"].to_numpy() atom_type_array = item["atom.type"].to_numpy() atom_formal_charge_array = item["atom.formal_charge"].to_numpy() group_index_array = item["group.index"].to_numpy() group_name_array = item["group.name"].to_numpy() group_id_array = item["group.id"].to_numpy() group_type_array = item["group.type"].to_numpy() component_index_array = item["component.index"].to_numpy() component_name_array = item["component.name"].to_numpy() component_id_array = item["component.id"].to_numpy() component_type_array = item["component.type"].to_numpy() chain_index_array = item["chain.index"].to_numpy() chain_name_array = item["chain.name"].to_numpy() chain_id_array = item["chain.id"].to_numpy() chain_type_array = item["chain.type"].to_numpy() entity_index_array = item["entity.index"].to_numpy() entity_name_array = item["entity.name"].to_numpy() entity_id_array = item["entity.id"].to_numpy() entity_type_array = item["entity.type"].to_numpy() molecule_index_array = item["molecule.index"].to_numpy() molecule_name_array = item["molecule.name"].to_numpy() molecule_id_array = item["molecule.id"].to_numpy() molecule_type_array = item["molecule.type"].to_numpy() former_group_index = -1 former_component_index = -1 former_chain_index = -1 former_entity_index = -1 former_molecule_index = -1 iterator = zip(atom_index_array, atom_name_array, atom_id_array, atom_type_array, atom_formal_charge_array, group_index_array, group_name_array, group_id_array, group_type_array, component_index_array, component_name_array, component_id_array, component_type_array, chain_index_array, chain_name_array, chain_id_array, chain_type_array, entity_index_array, entity_name_array, entity_id_array, entity_type_array, molecule_index_array, molecule_name_array, molecule_id_array, molecule_type_array) for atom_index, atom_name, atom_id, atom_type, atom_formal_charge, group_index, group_name, group_id, group_type,\ component_index, component_name, component_id, component_type, chain_index, chain_name,\ chain_id, chain_type, entity_index, entity_name, entity_id, entity_type, molecule_index,\ molecule_name, molecule_id, molecule_type in iterator: new_group = (former_group_index!=group_index) new_component = (former_component_index!=component_index) new_molecule = (former_molecule_index!=molecule_index) new_chain = (former_chain_index!=chain_index) new_entity = (former_entity_index!=entity_index) atom = elements.make_atom(index=atom_index, id=atom_id, name=atom_name, type=atom_type) atom.formal_charge = atom_formal_charge atoms.append(atom) if new_group: group = elements.make_group(index=group_index, id=group_id, name=group_name, type=group_type) groups.append(group) former_group_index = group_index if new_component: component = elements.make_component(index=component_index, id=component_id, name=component_name, type=component_type) components.append(component) former_component_index = component_index if new_molecule: molecule = elements.make_molecule(index=molecule_index, id=molecule_id, name=molecule_name, type=molecule_type) molecules.append(molecule) former_molecule_index = molecule_index if new_chain: chain = elements.make_chain(index=chain_index, id=chain_id, name=chain_name, type=chain_type) chains.append(chain) former_chain_index = chain_index if new_entity: entity = elements.make_entity(index=entity_index, id=entity_id, name=entity_name, type=entity_type) entities.append(entity) former_entity_index = entity_index atom.group = group group.atom.append(atom) group.atom_indices.append(atom_index) group.n_atoms+=1 atom.component = component component.atom.append(atom) component.atom_indices.append(atom_index) component.n_atoms+=1 atom.chain = chain chain.atom.append(atom) chain.atom_indices.append(atom_index) chain.n_atoms+=1 atom.molecule = molecule molecule.atom.append(atom) molecule.atom_indices.append(atom_index) molecule.n_atoms+=1 atom.entity = entity entity.atom.append(atom) entity.atom_indices.append(atom_index) entity.n_atoms+=1 if new_group: group.component = component component.group.append(group) component.group_indices.append(group_index) component.n_groups+=1 group.chain = chain chain.group.append(group) chain.group_indices.append(group_index) chain.n_groups+=1 group.molecule = molecule molecule.group.append(group) molecule.group_indices.append(group_index) molecule.n_groups+=1 group.entity = entity entity.group.append(group) entity.group_indices.append(group_index) entity.n_groups+=1 if new_component: component.chain = chain chain.component.append(component) chain.component_indices.append(component_index) chain.n_components+=1 component.molecule = molecule molecule.component.append(component) molecule.component_indices.append(component_index) molecule.n_components+=1 component.entity = entity entity.component.append(component) entity.component_indices.append(component_index) entity.n_components+=1 if new_molecule: molecule.entity = entity entity.molecule.append(molecule) entity.molecule_indices.append(molecule_index) entity.n_molecules+=1 if new_chain: chain.entity = entity entity.chain.append(chain) entity.chain_indices.append(chain_index) entity.n_chains+=1 del(atom_name_array, atom_id_array, atom_type_array) del(group_index_array, group_name_array, group_id_array, group_type_array) del(component_index_array, component_name_array, component_id_array, component_type_array) del(molecule_index_array, molecule_name_array, molecule_id_array, molecule_type_array) del(chain_index_array, chain_name_array, chain_id_array, chain_type_array) del(entity_index_array, entity_name_array, entity_id_array, entity_type_array) tmp_item = Topology() tmp_item.entity = entities tmp_item.n_entities = len(entities) tmp_item.entity_indices = arange(tmp_item.n_entities) tmp_item.molecule = molecules tmp_item.n_molecules = len(molecules) tmp_item.molecule_indices = arange(tmp_item.n_molecules) tmp_item.chain = chains tmp_item.n_chains = len(chains) tmp_item.chain_indices = arange(tmp_item.n_chains) tmp_item.component = components tmp_item.n_components = len(components) tmp_item.component_indices = arange(tmp_item.n_components) tmp_item.group = groups tmp_item.n_groups = len(groups) tmp_item.group_indices = arange(tmp_item.n_groups) tmp_item.atom = atoms tmp_item.n_atoms = len(atoms) tmp_item.atom_indices = arange(tmp_item.n_atoms) del(atoms, groups, components, molecules, chains, entities) # bonds atom_bonded_atom_indices_array = item["atom.bonded_atom_indices"].to_numpy() bond_index=0 bonds = [] bonded_atom_indices_list=[] for atom_index_0, bonded_atom_indices in zip(atom_index_array, atom_bonded_atom_indices_array): atom_0 = tmp_item.atom[atom_index_0] for atom_index_1 in bonded_atom_indices: if atom_index_0 < atom_index_1: atom_1 = tmp_item.atom[atom_index_1] bond = elements.make_bond(index=bond_index, atoms=[atom_0,atom_1]) atom_0.add_bond(bond) atom_1.add_bond(bond) bonds.append(bond) bonded_atom_indices_list.append([atom_index_0,atom_index_1]) bond_index+=1 tmp_item.bond = bonds tmp_item.n_bonds = len(bonds) tmp_item.bond_indices = arange(tmp_item.n_bonds) tmp_item.bonded_atom_indices = bonded_atom_indices_list del(atom_index_array, atom_bonded_atom_indices_array, bonded_atom_indices) del(bonds) tmp_item.dataframe = item.copy() return tmp_item def to_molsysmt_DataFrame(item, indices='all', structure_indices='all'): tmp_item = item.dataframe.copy() return tmp_item
molsysmt/native/old/former_topology/io/topology/classes/molsysmt_DataFrame.py
def to_molsysmt_DataFrame(item, trajectory_item=None, atom_indices='all', structure_indices='all'): return item.dataframe def from_molsysmt_DataFrame(item, trajectory_item=None, atom_indices='all', structure_indices='all'): from molsysmt.native.topology import Topology from molsysmt.native import elements from numpy import arange atoms = [] groups = [] components = [] chains = [] entities = [] molecules = [] n_atoms = item.shape[0] # atoms, groups, chains, entities, molecules atom_index_array = item["atom.index"].to_numpy() atom_name_array = item["atom.name"].to_numpy() atom_id_array = item["atom.id"].to_numpy() atom_type_array = item["atom.type"].to_numpy() atom_formal_charge_array = item["atom.formal_charge"].to_numpy() group_index_array = item["group.index"].to_numpy() group_name_array = item["group.name"].to_numpy() group_id_array = item["group.id"].to_numpy() group_type_array = item["group.type"].to_numpy() component_index_array = item["component.index"].to_numpy() component_name_array = item["component.name"].to_numpy() component_id_array = item["component.id"].to_numpy() component_type_array = item["component.type"].to_numpy() chain_index_array = item["chain.index"].to_numpy() chain_name_array = item["chain.name"].to_numpy() chain_id_array = item["chain.id"].to_numpy() chain_type_array = item["chain.type"].to_numpy() entity_index_array = item["entity.index"].to_numpy() entity_name_array = item["entity.name"].to_numpy() entity_id_array = item["entity.id"].to_numpy() entity_type_array = item["entity.type"].to_numpy() molecule_index_array = item["molecule.index"].to_numpy() molecule_name_array = item["molecule.name"].to_numpy() molecule_id_array = item["molecule.id"].to_numpy() molecule_type_array = item["molecule.type"].to_numpy() former_group_index = -1 former_component_index = -1 former_chain_index = -1 former_entity_index = -1 former_molecule_index = -1 iterator = zip(atom_index_array, atom_name_array, atom_id_array, atom_type_array, atom_formal_charge_array, group_index_array, group_name_array, group_id_array, group_type_array, component_index_array, component_name_array, component_id_array, component_type_array, chain_index_array, chain_name_array, chain_id_array, chain_type_array, entity_index_array, entity_name_array, entity_id_array, entity_type_array, molecule_index_array, molecule_name_array, molecule_id_array, molecule_type_array) for atom_index, atom_name, atom_id, atom_type, atom_formal_charge, group_index, group_name, group_id, group_type,\ component_index, component_name, component_id, component_type, chain_index, chain_name,\ chain_id, chain_type, entity_index, entity_name, entity_id, entity_type, molecule_index,\ molecule_name, molecule_id, molecule_type in iterator: new_group = (former_group_index!=group_index) new_component = (former_component_index!=component_index) new_molecule = (former_molecule_index!=molecule_index) new_chain = (former_chain_index!=chain_index) new_entity = (former_entity_index!=entity_index) atom = elements.make_atom(index=atom_index, id=atom_id, name=atom_name, type=atom_type) atom.formal_charge = atom_formal_charge atoms.append(atom) if new_group: group = elements.make_group(index=group_index, id=group_id, name=group_name, type=group_type) groups.append(group) former_group_index = group_index if new_component: component = elements.make_component(index=component_index, id=component_id, name=component_name, type=component_type) components.append(component) former_component_index = component_index if new_molecule: molecule = elements.make_molecule(index=molecule_index, id=molecule_id, name=molecule_name, type=molecule_type) molecules.append(molecule) former_molecule_index = molecule_index if new_chain: chain = elements.make_chain(index=chain_index, id=chain_id, name=chain_name, type=chain_type) chains.append(chain) former_chain_index = chain_index if new_entity: entity = elements.make_entity(index=entity_index, id=entity_id, name=entity_name, type=entity_type) entities.append(entity) former_entity_index = entity_index atom.group = group group.atom.append(atom) group.atom_indices.append(atom_index) group.n_atoms+=1 atom.component = component component.atom.append(atom) component.atom_indices.append(atom_index) component.n_atoms+=1 atom.chain = chain chain.atom.append(atom) chain.atom_indices.append(atom_index) chain.n_atoms+=1 atom.molecule = molecule molecule.atom.append(atom) molecule.atom_indices.append(atom_index) molecule.n_atoms+=1 atom.entity = entity entity.atom.append(atom) entity.atom_indices.append(atom_index) entity.n_atoms+=1 if new_group: group.component = component component.group.append(group) component.group_indices.append(group_index) component.n_groups+=1 group.chain = chain chain.group.append(group) chain.group_indices.append(group_index) chain.n_groups+=1 group.molecule = molecule molecule.group.append(group) molecule.group_indices.append(group_index) molecule.n_groups+=1 group.entity = entity entity.group.append(group) entity.group_indices.append(group_index) entity.n_groups+=1 if new_component: component.chain = chain chain.component.append(component) chain.component_indices.append(component_index) chain.n_components+=1 component.molecule = molecule molecule.component.append(component) molecule.component_indices.append(component_index) molecule.n_components+=1 component.entity = entity entity.component.append(component) entity.component_indices.append(component_index) entity.n_components+=1 if new_molecule: molecule.entity = entity entity.molecule.append(molecule) entity.molecule_indices.append(molecule_index) entity.n_molecules+=1 if new_chain: chain.entity = entity entity.chain.append(chain) entity.chain_indices.append(chain_index) entity.n_chains+=1 del(atom_name_array, atom_id_array, atom_type_array) del(group_index_array, group_name_array, group_id_array, group_type_array) del(component_index_array, component_name_array, component_id_array, component_type_array) del(molecule_index_array, molecule_name_array, molecule_id_array, molecule_type_array) del(chain_index_array, chain_name_array, chain_id_array, chain_type_array) del(entity_index_array, entity_name_array, entity_id_array, entity_type_array) tmp_item = Topology() tmp_item.entity = entities tmp_item.n_entities = len(entities) tmp_item.entity_indices = arange(tmp_item.n_entities) tmp_item.molecule = molecules tmp_item.n_molecules = len(molecules) tmp_item.molecule_indices = arange(tmp_item.n_molecules) tmp_item.chain = chains tmp_item.n_chains = len(chains) tmp_item.chain_indices = arange(tmp_item.n_chains) tmp_item.component = components tmp_item.n_components = len(components) tmp_item.component_indices = arange(tmp_item.n_components) tmp_item.group = groups tmp_item.n_groups = len(groups) tmp_item.group_indices = arange(tmp_item.n_groups) tmp_item.atom = atoms tmp_item.n_atoms = len(atoms) tmp_item.atom_indices = arange(tmp_item.n_atoms) del(atoms, groups, components, molecules, chains, entities) # bonds atom_bonded_atom_indices_array = item["atom.bonded_atom_indices"].to_numpy() bond_index=0 bonds = [] bonded_atom_indices_list=[] for atom_index_0, bonded_atom_indices in zip(atom_index_array, atom_bonded_atom_indices_array): atom_0 = tmp_item.atom[atom_index_0] for atom_index_1 in bonded_atom_indices: if atom_index_0 < atom_index_1: atom_1 = tmp_item.atom[atom_index_1] bond = elements.make_bond(index=bond_index, atoms=[atom_0,atom_1]) atom_0.add_bond(bond) atom_1.add_bond(bond) bonds.append(bond) bonded_atom_indices_list.append([atom_index_0,atom_index_1]) bond_index+=1 tmp_item.bond = bonds tmp_item.n_bonds = len(bonds) tmp_item.bond_indices = arange(tmp_item.n_bonds) tmp_item.bonded_atom_indices = bonded_atom_indices_list del(atom_index_array, atom_bonded_atom_indices_array, bonded_atom_indices) del(bonds) tmp_item.dataframe = item.copy() return tmp_item def to_molsysmt_DataFrame(item, indices='all', structure_indices='all'): tmp_item = item.dataframe.copy() return tmp_item
0.318803
0.448366
import sys if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover from typing import Union, List, Any import troposphere.policies from troposphere import Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING class AutoScalingRollingUpdate(troposphere.policies.AutoScalingRollingUpdate, Mixin): def __init__(self, title=None, MaxBatchSize=NOTHING, # type: int MinInstancesInService=NOTHING, # type: int MinSuccessfulInstancesPercent=NOTHING, # type: int PauseTime=NOTHING, # type: Any SuspendProcesses=NOTHING, # type: List[Union[str, AWSHelperFn]] WaitOnResourceSignals=NOTHING, # type: bool **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, MaxBatchSize=MaxBatchSize, MinInstancesInService=MinInstancesInService, MinSuccessfulInstancesPercent=MinSuccessfulInstancesPercent, PauseTime=PauseTime, SuspendProcesses=SuspendProcesses, WaitOnResourceSignals=WaitOnResourceSignals, **kwargs ) super(AutoScalingRollingUpdate, self).__init__(**processed_kwargs) class AutoScalingScheduledAction(troposphere.policies.AutoScalingScheduledAction, Mixin): def __init__(self, title=None, IgnoreUnmodifiedGroupSizeProperties=NOTHING, # type: bool **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, IgnoreUnmodifiedGroupSizeProperties=IgnoreUnmodifiedGroupSizeProperties, **kwargs ) super(AutoScalingScheduledAction, self).__init__(**processed_kwargs) class AutoScalingReplacingUpdate(troposphere.policies.AutoScalingReplacingUpdate, Mixin): def __init__(self, title=None, WillReplace=NOTHING, # type: bool **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, WillReplace=WillReplace, **kwargs ) super(AutoScalingReplacingUpdate, self).__init__(**processed_kwargs) class CodeDeployLambdaAliasUpdate(troposphere.policies.CodeDeployLambdaAliasUpdate, Mixin): def __init__(self, title=None, ApplicationName=REQUIRED, # type: bool DeploymentGroupName=REQUIRED, # type: bool AfterAllowTrafficHook=NOTHING, # type: Union[str, AWSHelperFn] BeforeAllowTrafficHook=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, ApplicationName=ApplicationName, DeploymentGroupName=DeploymentGroupName, AfterAllowTrafficHook=AfterAllowTrafficHook, BeforeAllowTrafficHook=BeforeAllowTrafficHook, **kwargs ) super(CodeDeployLambdaAliasUpdate, self).__init__(**processed_kwargs) class ResourceSignal(troposphere.policies.ResourceSignal, Mixin): def __init__(self, title=None, Count=NOTHING, # type: int Timeout=NOTHING, # type: Any **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Count=Count, Timeout=Timeout, **kwargs ) super(ResourceSignal, self).__init__(**processed_kwargs) class AutoScalingCreationPolicy(troposphere.policies.AutoScalingCreationPolicy, Mixin): def __init__(self, title=None, MinSuccessfulInstancesPercent=NOTHING, # type: int **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, MinSuccessfulInstancesPercent=MinSuccessfulInstancesPercent, **kwargs ) super(AutoScalingCreationPolicy, self).__init__(**processed_kwargs)
troposphere_mate/policies.py
import sys if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover from typing import Union, List, Any import troposphere.policies from troposphere import Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING class AutoScalingRollingUpdate(troposphere.policies.AutoScalingRollingUpdate, Mixin): def __init__(self, title=None, MaxBatchSize=NOTHING, # type: int MinInstancesInService=NOTHING, # type: int MinSuccessfulInstancesPercent=NOTHING, # type: int PauseTime=NOTHING, # type: Any SuspendProcesses=NOTHING, # type: List[Union[str, AWSHelperFn]] WaitOnResourceSignals=NOTHING, # type: bool **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, MaxBatchSize=MaxBatchSize, MinInstancesInService=MinInstancesInService, MinSuccessfulInstancesPercent=MinSuccessfulInstancesPercent, PauseTime=PauseTime, SuspendProcesses=SuspendProcesses, WaitOnResourceSignals=WaitOnResourceSignals, **kwargs ) super(AutoScalingRollingUpdate, self).__init__(**processed_kwargs) class AutoScalingScheduledAction(troposphere.policies.AutoScalingScheduledAction, Mixin): def __init__(self, title=None, IgnoreUnmodifiedGroupSizeProperties=NOTHING, # type: bool **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, IgnoreUnmodifiedGroupSizeProperties=IgnoreUnmodifiedGroupSizeProperties, **kwargs ) super(AutoScalingScheduledAction, self).__init__(**processed_kwargs) class AutoScalingReplacingUpdate(troposphere.policies.AutoScalingReplacingUpdate, Mixin): def __init__(self, title=None, WillReplace=NOTHING, # type: bool **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, WillReplace=WillReplace, **kwargs ) super(AutoScalingReplacingUpdate, self).__init__(**processed_kwargs) class CodeDeployLambdaAliasUpdate(troposphere.policies.CodeDeployLambdaAliasUpdate, Mixin): def __init__(self, title=None, ApplicationName=REQUIRED, # type: bool DeploymentGroupName=REQUIRED, # type: bool AfterAllowTrafficHook=NOTHING, # type: Union[str, AWSHelperFn] BeforeAllowTrafficHook=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, ApplicationName=ApplicationName, DeploymentGroupName=DeploymentGroupName, AfterAllowTrafficHook=AfterAllowTrafficHook, BeforeAllowTrafficHook=BeforeAllowTrafficHook, **kwargs ) super(CodeDeployLambdaAliasUpdate, self).__init__(**processed_kwargs) class ResourceSignal(troposphere.policies.ResourceSignal, Mixin): def __init__(self, title=None, Count=NOTHING, # type: int Timeout=NOTHING, # type: Any **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Count=Count, Timeout=Timeout, **kwargs ) super(ResourceSignal, self).__init__(**processed_kwargs) class AutoScalingCreationPolicy(troposphere.policies.AutoScalingCreationPolicy, Mixin): def __init__(self, title=None, MinSuccessfulInstancesPercent=NOTHING, # type: int **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, MinSuccessfulInstancesPercent=MinSuccessfulInstancesPercent, **kwargs ) super(AutoScalingCreationPolicy, self).__init__(**processed_kwargs)
0.362179
0.21916
import cgi import cgitb import urllib from oauth2client.client import OAuth2WebServerFlow cgitb.enable() SCOPE = 'https://www.googleapis.com/auth/drive.file' AUTHORIZED_REDIRECT = "https://philosophyofpen.com/login/backup.py" CLIENT_ID = 'TODO' CLIENT_SECRET = 'TODO' args = cgi.FieldStorage() if 'redirectbacktoken' in args: # Someone is trying to authenticate with the add-on, direct them to the google auth url flow = OAuth2WebServerFlow( client_id=CLIENT_ID, client_secret=CLIENT_SECRET, scope=SCOPE, redirect_uri=AUTHORIZED_REDIRECT, include_granted_scopes='true', prompt='consent', access_type='offline', state=args.getvalue('redirectbacktoken')) print("Status: 303 See other") print("Location: " + flow.step1_get_authorize_url()) print("") elif 'state' in args and 'code' in args: # This is a reply FROM google's authentication server try: flow = OAuth2WebServerFlow( client_id=CLIENT_ID, client_secret=CLIENT_SECRET, scope=SCOPE, redirect_uri=AUTHORIZED_REDIRECT, include_granted_scopes='true', prompt='consent', access_type='offline', state=args.getvalue('state')) creds = flow.step2_exchange(args.getvalue('code')) # Redirect to "state" address with serialized creentials" print("Status: 303 See other") print("Location: " + urllib.parse.unquote(args.getvalue('state')) + "?creds=" + urllib.parse.quote(creds.to_json())) print("") except Exception as e: print("Content-Type: text/html") print("") print("The server encountered an error while processing this request: " + str(e) + "<br/>") print("Please <a href='https://github.com/sabeechen/hassio-google-drive-backup/issues'>file an issue</a> on Hass.io Google Backup's GitHub page so I'm aware of this problem or attempt authorizing with Google Drive again.") else: print("Status: 400 Bad Request") print("")
server/www/html/backup.py
import cgi import cgitb import urllib from oauth2client.client import OAuth2WebServerFlow cgitb.enable() SCOPE = 'https://www.googleapis.com/auth/drive.file' AUTHORIZED_REDIRECT = "https://philosophyofpen.com/login/backup.py" CLIENT_ID = 'TODO' CLIENT_SECRET = 'TODO' args = cgi.FieldStorage() if 'redirectbacktoken' in args: # Someone is trying to authenticate with the add-on, direct them to the google auth url flow = OAuth2WebServerFlow( client_id=CLIENT_ID, client_secret=CLIENT_SECRET, scope=SCOPE, redirect_uri=AUTHORIZED_REDIRECT, include_granted_scopes='true', prompt='consent', access_type='offline', state=args.getvalue('redirectbacktoken')) print("Status: 303 See other") print("Location: " + flow.step1_get_authorize_url()) print("") elif 'state' in args and 'code' in args: # This is a reply FROM google's authentication server try: flow = OAuth2WebServerFlow( client_id=CLIENT_ID, client_secret=CLIENT_SECRET, scope=SCOPE, redirect_uri=AUTHORIZED_REDIRECT, include_granted_scopes='true', prompt='consent', access_type='offline', state=args.getvalue('state')) creds = flow.step2_exchange(args.getvalue('code')) # Redirect to "state" address with serialized creentials" print("Status: 303 See other") print("Location: " + urllib.parse.unquote(args.getvalue('state')) + "?creds=" + urllib.parse.quote(creds.to_json())) print("") except Exception as e: print("Content-Type: text/html") print("") print("The server encountered an error while processing this request: " + str(e) + "<br/>") print("Please <a href='https://github.com/sabeechen/hassio-google-drive-backup/issues'>file an issue</a> on Hass.io Google Backup's GitHub page so I'm aware of this problem or attempt authorizing with Google Drive again.") else: print("Status: 400 Bad Request") print("")
0.179279
0.062331
import tempfile import shutil import os import logging try: import unittest2 as unittest except ImportError: import unittest from repoman.depot_manager import DepotManager from repoman.roster import Clone FIXTURE_PATH = 'fixtures' SELF_DIRECTORY_PATH = os.path.dirname(__file__) logging.basicConfig() class AbstractTestDepotManager(object): REPO_KIND = None def setUp(self): # Create execution path self.enviroment_path = tempfile.mkdtemp() self.rman = DepotManager( main_workspace=self.enviroment_path, repo_kind=self.REPO_KIND) def tearDown(self): shutil.rmtree(self.enviroment_path) def test_give_me_depot(self): new_clone = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertIsNotNone(new_clone) self.assertEquals(Clone.INUSE, self.rman.roster[new_clone.path].status) def test_free_depot(self): # Reserve clone repo = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertIsNotNone(repo) self.assertEquals(Clone.INUSE, self.rman.roster[repo.path].status) # Free the repo. self.rman.free_depot(repo, '1') self.assertIn(repo.path, self.rman.roster) self.assertEquals(Clone.FREE, self.rman.roster[repo.path].status) @staticmethod def _add_file(repo, file_name): with open(os.path.join(repo.path, file_name), 'w+') as f: f.write('something\n') repo.add([file_name]) @staticmethod def _get_tag_names(tags): return list(tags) @staticmethod def _get_branch_names(branches): return [branch.name for branch in branches] def test_free_depot_with_new_references(self): depot = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) new_tag = 'new_tag' new_branch = 'new_branch' # Work on the repository and free it, it should be cleared self._add_file(depot.repository, 'foo') depot.repository.commit("Initial message") depot.repository.branch(new_branch) self._add_file(depot.repository, 'bar') depot.repository.commit("Other commit") depot.repository.tag(new_tag) self.assertIn( new_tag, self._get_tag_names(depot.repository.tags())) self.assertIn( new_branch, self._get_branch_names(depot.repository.get_branches())) self.rman.free_depot(depot, '1') # Work again with the same repository, and check that previous branches # and tags are not there depot = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self._add_file(depot.repository, 'baz') depot.repository.commit("Initial message") self.assertNotIn( new_tag, self._get_tag_names(depot.repository.tags())) self.assertNotIn( new_branch, self._get_branch_names(depot.repository.get_branches())) self.rman.free_depot(depot, '1') # And check again that the repository can be requested and released depot = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertNotIn( new_tag, self._get_tag_names(depot.repository.tags())) self.assertNotIn( new_branch, self._get_branch_names(depot.repository.get_branches())) self.rman.free_depot(depot, '1') def test_free_dirty_depot(self): depot = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) a_file_path = os.path.join(depot.path, 'a_file') with open(a_file_path, 'w+') as a_file: a_file.write('something') self.assertTrue(os.path.exists(a_file_path)) self.rman.free_depot(depot, '1') self.assertTrue(not os.path.exists(a_file_path)) def test_list_clones(self): self.assertListEqual([], self.rman.roster.values()) repo = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertListEqual( [self.rman.roster[repo.path]], self.rman.roster.values()) def test_available_clones(self): self.assertSequenceEqual([], self.rman.roster.get_available()) repo = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertIsNotNone(repo) # Free the repo. clon = [clone for clone in self.rman.roster.get_not_available() if clone.path == repo.path][0] self.rman.free_depot(clon, '1') repo_clon = [ clone for clone in self.rman.roster.get_available() if clone.path == repo.path ][0] self.assertSequenceEqual([repo_clon], self.rman.roster.get_available()) self.assertEquals( repo_clon, self.rman.get_available_clone(repo_clon.path)) self.assertIsNone(self.rman.get_not_available_clone(repo_clon.path)) @staticmethod def mock_lock(path): raise NotImplemented() def test_lock_cleanup(self): # Simulate an unlocked repository, self.rman.free_depot is not used # because it calls repository operations that can depend on the locks depot = self.rman.give_me_depot('1', 'bla') self.mock_lock(depot.path) self.rman.roster.free_clone( self.rman.get_not_available_clone(depot.path), '1') depot = self.rman.give_me_depot('1', 'bla') self._add_file(depot.repository, 'foo') class TestGitDepotManager(AbstractTestDepotManager, unittest.TestCase): REPO_KIND = 'git' @staticmethod def mock_lock(path): index_lock_path = os.path.join(path, '.git/index.lock') open(index_lock_path, 'w').close() class TestHgDepotManager(AbstractTestDepotManager, unittest.TestCase): REPO_KIND = 'hg' @staticmethod def _get_tag_names(tags): for tag in tags: yield tag[0] @staticmethod def mock_lock(path): wlock_path = os.path.join(path, '.hg/wlock') lock_path = os.path.join(path, '.hg/store/lock') for path in (wlock_path, lock_path): open(path, 'w').close()
tests/test_clonemanager.py
import tempfile import shutil import os import logging try: import unittest2 as unittest except ImportError: import unittest from repoman.depot_manager import DepotManager from repoman.roster import Clone FIXTURE_PATH = 'fixtures' SELF_DIRECTORY_PATH = os.path.dirname(__file__) logging.basicConfig() class AbstractTestDepotManager(object): REPO_KIND = None def setUp(self): # Create execution path self.enviroment_path = tempfile.mkdtemp() self.rman = DepotManager( main_workspace=self.enviroment_path, repo_kind=self.REPO_KIND) def tearDown(self): shutil.rmtree(self.enviroment_path) def test_give_me_depot(self): new_clone = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertIsNotNone(new_clone) self.assertEquals(Clone.INUSE, self.rman.roster[new_clone.path].status) def test_free_depot(self): # Reserve clone repo = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertIsNotNone(repo) self.assertEquals(Clone.INUSE, self.rman.roster[repo.path].status) # Free the repo. self.rman.free_depot(repo, '1') self.assertIn(repo.path, self.rman.roster) self.assertEquals(Clone.FREE, self.rman.roster[repo.path].status) @staticmethod def _add_file(repo, file_name): with open(os.path.join(repo.path, file_name), 'w+') as f: f.write('something\n') repo.add([file_name]) @staticmethod def _get_tag_names(tags): return list(tags) @staticmethod def _get_branch_names(branches): return [branch.name for branch in branches] def test_free_depot_with_new_references(self): depot = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) new_tag = 'new_tag' new_branch = 'new_branch' # Work on the repository and free it, it should be cleared self._add_file(depot.repository, 'foo') depot.repository.commit("Initial message") depot.repository.branch(new_branch) self._add_file(depot.repository, 'bar') depot.repository.commit("Other commit") depot.repository.tag(new_tag) self.assertIn( new_tag, self._get_tag_names(depot.repository.tags())) self.assertIn( new_branch, self._get_branch_names(depot.repository.get_branches())) self.rman.free_depot(depot, '1') # Work again with the same repository, and check that previous branches # and tags are not there depot = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self._add_file(depot.repository, 'baz') depot.repository.commit("Initial message") self.assertNotIn( new_tag, self._get_tag_names(depot.repository.tags())) self.assertNotIn( new_branch, self._get_branch_names(depot.repository.get_branches())) self.rman.free_depot(depot, '1') # And check again that the repository can be requested and released depot = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertNotIn( new_tag, self._get_tag_names(depot.repository.tags())) self.assertNotIn( new_branch, self._get_branch_names(depot.repository.get_branches())) self.rman.free_depot(depot, '1') def test_free_dirty_depot(self): depot = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) a_file_path = os.path.join(depot.path, 'a_file') with open(a_file_path, 'w+') as a_file: a_file.write('something') self.assertTrue(os.path.exists(a_file_path)) self.rman.free_depot(depot, '1') self.assertTrue(not os.path.exists(a_file_path)) def test_list_clones(self): self.assertListEqual([], self.rman.roster.values()) repo = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertListEqual( [self.rman.roster[repo.path]], self.rman.roster.values()) def test_available_clones(self): self.assertSequenceEqual([], self.rman.roster.get_available()) repo = self.rman.give_me_depot( '1', 'bla', {}, self.rman.main_cache_path) self.assertIsNotNone(repo) # Free the repo. clon = [clone for clone in self.rman.roster.get_not_available() if clone.path == repo.path][0] self.rman.free_depot(clon, '1') repo_clon = [ clone for clone in self.rman.roster.get_available() if clone.path == repo.path ][0] self.assertSequenceEqual([repo_clon], self.rman.roster.get_available()) self.assertEquals( repo_clon, self.rman.get_available_clone(repo_clon.path)) self.assertIsNone(self.rman.get_not_available_clone(repo_clon.path)) @staticmethod def mock_lock(path): raise NotImplemented() def test_lock_cleanup(self): # Simulate an unlocked repository, self.rman.free_depot is not used # because it calls repository operations that can depend on the locks depot = self.rman.give_me_depot('1', 'bla') self.mock_lock(depot.path) self.rman.roster.free_clone( self.rman.get_not_available_clone(depot.path), '1') depot = self.rman.give_me_depot('1', 'bla') self._add_file(depot.repository, 'foo') class TestGitDepotManager(AbstractTestDepotManager, unittest.TestCase): REPO_KIND = 'git' @staticmethod def mock_lock(path): index_lock_path = os.path.join(path, '.git/index.lock') open(index_lock_path, 'w').close() class TestHgDepotManager(AbstractTestDepotManager, unittest.TestCase): REPO_KIND = 'hg' @staticmethod def _get_tag_names(tags): for tag in tags: yield tag[0] @staticmethod def mock_lock(path): wlock_path = os.path.join(path, '.hg/wlock') lock_path = os.path.join(path, '.hg/store/lock') for path in (wlock_path, lock_path): open(path, 'w').close()
0.488039
0.21307
import sys import os import json current_dir = os.path.dirname(os.path.abspath(__file__)) U_JOIN = 0x200d U_VARIATION_SELECTOR_16 = 0xfe0f U_EXTRA = (U_JOIN, U_VARIATION_SELECTOR_16) if sys.maxunicode == 0xFFFF: # For ease of supporting, just require uniseq for both narrow and wide PY27. def get_code_points(s): """Get the Unicode code points.""" pt = [] def is_full_point(p, point): """ Check if we have a full code point. Surrogates are stored in point. """ v = ord(p) if 0xD800 <= v <= 0xDBFF: del point[:] point.append(p) return False if point and 0xDC00 <= v <= 0xDFFF: point.append(p) return True del point[:] return True return [(''.join(pt) if pt else c) for c in s if is_full_point(c, pt)] def get_ord(c): """Get Unicode ordinal number.""" if len(c) == 2: high, low = [ord(p) for p in c] ordinal = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000 else: ordinal = ord(c) return ordinal else: def get_code_points(s): """Get the Unicode code points.""" return [c for c in s] def get_ord(c): """Get Unicode ordinal number.""" return ord(c) def get_unicode(value): """Get Unicode.""" uc = '-'.join( ['%04x' % get_ord(point) for point in get_code_points(value['emoji']) if get_ord(point) not in U_EXTRA] ) uc_alt = '-'.join( ['%04x' % get_ord(point) for point in get_code_points(value['emoji'])] ) if uc == uc_alt: uc_alt = None return uc, uc_alt def get_gemoji_specific(value): """Get alternate Unicode form or return the original.""" return value['aliases'][0] def parse(repo, tag): """Save test files.""" # Load emoji database with open(os.path.join(current_dir, 'tags', repo, repo, 'db', 'emoji.json'), 'r') as f: emojis = json.loads(f.read()) emoji_db = {} shortnames = set() aliases = {} for v in emojis: short = v['aliases'][0] shortnames.add(':%s:' % short) if 'emoji' in v: uc, uc_alt = get_unicode(v) emoji_db[':%s:' % short] = { 'name': v.get('description', short), 'unicode': uc, 'category': v['category'] } if uc_alt: emoji_db[':%s:' % short]['unicode_alt'] = uc_alt else: emoji_db[':%s:' % short] = { 'name': v.get('description', short) } for alias in v['aliases'][1:]: aliases[':%s:' % alias] = ':%s:' % short # Save test files for test in ('png', 'entities'): with open('../tests/extensions/emoji/gemoji (%s).txt' % test, 'w') as f: f.write('# Emojis\n') count = 0 for emoji in sorted(shortnames): f.write(''.join('%s %s<br>\n' % (emoji[1:-1], emoji))) count += 1 if test != 'png' and count == 10: break with open(os.path.join(current_dir, 'tags', repo, repo, 'LICENSE'), 'r') as f: license_content = f.read() # Write out essential info with open('../pymdownx/gemoji_db.py', 'w') as f: # Dump emoji db to file and strip out PY2 unicode specifiers f.write('"""Gemoji autogen.\n\nGenerated from gemoji source. Do not edit by hand.\n\n%s"""\n' % license_content) f.write('from __future__ import unicode_literals\n') f.write('version = "%s"\n' % tag) f.write('name = "gemoji"\n') f.write('emoji = %s\n' % json.dumps(emoji_db, sort_keys=True, indent=4, separators=(',', ': '))) f.write('aliases = %s\n' % json.dumps(aliases, sort_keys=True, indent=4, separators=(',', ': ')))
3rdparty/pymdown-extensions/tools/gen_gemoji.py
import sys import os import json current_dir = os.path.dirname(os.path.abspath(__file__)) U_JOIN = 0x200d U_VARIATION_SELECTOR_16 = 0xfe0f U_EXTRA = (U_JOIN, U_VARIATION_SELECTOR_16) if sys.maxunicode == 0xFFFF: # For ease of supporting, just require uniseq for both narrow and wide PY27. def get_code_points(s): """Get the Unicode code points.""" pt = [] def is_full_point(p, point): """ Check if we have a full code point. Surrogates are stored in point. """ v = ord(p) if 0xD800 <= v <= 0xDBFF: del point[:] point.append(p) return False if point and 0xDC00 <= v <= 0xDFFF: point.append(p) return True del point[:] return True return [(''.join(pt) if pt else c) for c in s if is_full_point(c, pt)] def get_ord(c): """Get Unicode ordinal number.""" if len(c) == 2: high, low = [ord(p) for p in c] ordinal = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000 else: ordinal = ord(c) return ordinal else: def get_code_points(s): """Get the Unicode code points.""" return [c for c in s] def get_ord(c): """Get Unicode ordinal number.""" return ord(c) def get_unicode(value): """Get Unicode.""" uc = '-'.join( ['%04x' % get_ord(point) for point in get_code_points(value['emoji']) if get_ord(point) not in U_EXTRA] ) uc_alt = '-'.join( ['%04x' % get_ord(point) for point in get_code_points(value['emoji'])] ) if uc == uc_alt: uc_alt = None return uc, uc_alt def get_gemoji_specific(value): """Get alternate Unicode form or return the original.""" return value['aliases'][0] def parse(repo, tag): """Save test files.""" # Load emoji database with open(os.path.join(current_dir, 'tags', repo, repo, 'db', 'emoji.json'), 'r') as f: emojis = json.loads(f.read()) emoji_db = {} shortnames = set() aliases = {} for v in emojis: short = v['aliases'][0] shortnames.add(':%s:' % short) if 'emoji' in v: uc, uc_alt = get_unicode(v) emoji_db[':%s:' % short] = { 'name': v.get('description', short), 'unicode': uc, 'category': v['category'] } if uc_alt: emoji_db[':%s:' % short]['unicode_alt'] = uc_alt else: emoji_db[':%s:' % short] = { 'name': v.get('description', short) } for alias in v['aliases'][1:]: aliases[':%s:' % alias] = ':%s:' % short # Save test files for test in ('png', 'entities'): with open('../tests/extensions/emoji/gemoji (%s).txt' % test, 'w') as f: f.write('# Emojis\n') count = 0 for emoji in sorted(shortnames): f.write(''.join('%s %s<br>\n' % (emoji[1:-1], emoji))) count += 1 if test != 'png' and count == 10: break with open(os.path.join(current_dir, 'tags', repo, repo, 'LICENSE'), 'r') as f: license_content = f.read() # Write out essential info with open('../pymdownx/gemoji_db.py', 'w') as f: # Dump emoji db to file and strip out PY2 unicode specifiers f.write('"""Gemoji autogen.\n\nGenerated from gemoji source. Do not edit by hand.\n\n%s"""\n' % license_content) f.write('from __future__ import unicode_literals\n') f.write('version = "%s"\n' % tag) f.write('name = "gemoji"\n') f.write('emoji = %s\n' % json.dumps(emoji_db, sort_keys=True, indent=4, separators=(',', ': '))) f.write('aliases = %s\n' % json.dumps(aliases, sort_keys=True, indent=4, separators=(',', ': ')))
0.444324
0.158858
from django.db import models import uuid from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.utils.text import slugify # Create your models here. class DesignBaseClass(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) title = models.CharField(max_length=127) slug = models.SlugField(max_length=255, null=True) date_created = models.DateTimeField(auto_now_add=True) published = models.BooleanField(default=True) reviewed = models.BooleanField(default=False) class Meta: abstract = True def __str__(self): return f'{self.title}' def save(self, *args, **kwargs): # Slugify the name for the URL self.slug = slugify(self.title) super(DesignBaseClass, self).save(*args, **kwargs) def publishedFlip(self, *args, **kwargs): """ Published Flip Switch """ self.published = not self.published try: self.save(*args, **kwargs) except: ValidationError("Internal Server Error") def reviewFlip(self, *args, **kwargs): """ Published revied Flip """ self.reviewed = not self.reviewed try: self.save(*args, **kwargs) except: ValidationError("Internal Server Error") class Team(DesignBaseClass): """ Teams for Capstone Project Profanity Check """ description = models.CharField(max_length=255) members = models.ManyToManyField(User) token = models.UUIDField( default=uuid.uuid4) # email joining def checkMembers(self): """ Query to check wether there are less than 2 members on the group """ if (1): return True else: return ValidationError(" Less 2 Members not Allowed") class Project(DesignBaseClass): """ Project Models """ team = models.ForeignKey( Team, on_delete=models.PROTECT) access_token = models.UUIDField( default=uuid.uuid4) description = models.TextField(max_length=512) image = models.ImageField( upload_to='project_header', blank=True, null=True) logo = models.ImageField(upload_to='project_logo', blank=True, null=True)
src/project/models.py
from django.db import models import uuid from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.utils.text import slugify # Create your models here. class DesignBaseClass(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) title = models.CharField(max_length=127) slug = models.SlugField(max_length=255, null=True) date_created = models.DateTimeField(auto_now_add=True) published = models.BooleanField(default=True) reviewed = models.BooleanField(default=False) class Meta: abstract = True def __str__(self): return f'{self.title}' def save(self, *args, **kwargs): # Slugify the name for the URL self.slug = slugify(self.title) super(DesignBaseClass, self).save(*args, **kwargs) def publishedFlip(self, *args, **kwargs): """ Published Flip Switch """ self.published = not self.published try: self.save(*args, **kwargs) except: ValidationError("Internal Server Error") def reviewFlip(self, *args, **kwargs): """ Published revied Flip """ self.reviewed = not self.reviewed try: self.save(*args, **kwargs) except: ValidationError("Internal Server Error") class Team(DesignBaseClass): """ Teams for Capstone Project Profanity Check """ description = models.CharField(max_length=255) members = models.ManyToManyField(User) token = models.UUIDField( default=uuid.uuid4) # email joining def checkMembers(self): """ Query to check wether there are less than 2 members on the group """ if (1): return True else: return ValidationError(" Less 2 Members not Allowed") class Project(DesignBaseClass): """ Project Models """ team = models.ForeignKey( Team, on_delete=models.PROTECT) access_token = models.UUIDField( default=uuid.uuid4) description = models.TextField(max_length=512) image = models.ImageField( upload_to='project_header', blank=True, null=True) logo = models.ImageField(upload_to='project_logo', blank=True, null=True)
0.511229
0.103976
from pyrogram import Client, filters from pyrogram.types import ReplyKeyboardMarkup from pyrogram.types import ChatPermissions import time import asyncio import os app = Client( "my_bot", bot_token = "1940303458:<KEY>" api_hash = "eb06d4abfb49dc3eeb1aeb98ae0f581e", api_id = 6 ) #ПРивет @app.on_message(filters.regex(r"[Пп][Рр][Ии][Вв][Ее][Тт] [Кк][Оо][Тт]")) @app.on_message(filters.regex(r"[Дд][Аа][Рр][Оо][Вв][Аа], [Кк][Оо][Тт]")) @app.on_message(filters.regex(r"[Пп][Рр][Ии][Вв][Ее][Тт], [Кк][Оо][Тт]")) @app.on_message(filters.regex(r"[Дд][Аа][Рр][Оо][Вв][Аа] [Кк][Оо][Тт]")) async def func(app,msg): await msg.reply(f"Привет, {msg.from_user.mention}") @app.on_message(filters.regex(r"/start") & filters.private) async def func1(app,msg): await msg.reply(f"__**Привет, я - бот инструктор по установке Kgbot^a.\nМои команды:__**\n**__/instruction__**") @app.on_message(filters.regex(r"/instruction") & filters.private) async def func1(app,msg): await app.send_message(msg.chat.id,"__**Хорошо, Kgbot'а можно поставить через приложение [Termux](https://play.google.com/store/apps/details?id=com.termux) и сайт replit.com, какой способ выбираете?**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Поставить через приложение Termux"], ["Поставить через сайт Repl.it"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Поставить через сайт Repl.it") & filters.private) async def func6(app,msg): await app.send_message(msg.chat.id,"__**Для этого вам необходимо зарегестрироваться на сайте <u>replit.com</u>**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Зарегестрировался"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Зарегестрировался") & filters.private) async def func7(app,msg): await app.send_message(msg.chat.id,"__**После регестрации перейдите по ссылке <u>https://replit.com/github/cemiix/kgbotpublic</u> и запустите(Зеленая кнопочка.)**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Дальше"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Дальше") & filters.private) async def func8(app,msg): await app.send_message(msg.chat.id,"__**После успешной авторизации бота, в консоли должна появиться ссылка, берем её и идем на сайт <u>UptimeRobot.com</u>**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Дaльше"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Дaльше") & filters.private) async def func9(app,msg): await app.send_message(msg.chat.id,"__**Регистрируемся или Входим в свой аккаунт, нажимаем <code>Add New Monitor</code>, выбираем тип монитора <code>HTTP(S)</code>, имя монитора какое хотите, а в поле url, вставляем ссылку, которую мы получили после авторизации бота**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Готово"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Готово") & filters.private) async def func10(app,msg): await app.send_message(msg.chat.id,"__**Готово(Не забудьте написать свой номер телефона и код.)\nАвторы юзербота: @cemiix, @pomyanem_ne_tegai\nАвтор бота: @qotenok\nЕсли вдруг возникнет ошибка пишите: https://t.me/kgbot_modules\nКанал с модулями: https://t.me/kgbotmodules**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Поставить через приложение Termux"], ["Поставить через сайт Repl.it"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Поставить через приложение Termux") & filters.private) async def func2(app,msg): await app.send_message(msg.chat.id,"__**Для этого вам необходимо установить на свой телефон приложение Termux.**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Установил"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Установил") & filters.private) async def func3(app,msg): await app.send_message(msg.chat.id,"__**Введите следующие команды:**__\n <code>apt update\napt upgrade\npkg install git python\npip install pyrogram\npip install meval\npip install tgcrypto\npip install wheel\npip install Pillow</code>", reply_markup = ReplyKeyboardMarkup( [ ["", "Написал"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Написал") & filters.private) async def func4(app,msg): await app.send_message(msg.chat.id,"__**После всего этого пишем:**__\n<code>git clone https://github.com/Laimusp/KGBotPublic && cd KGBotPublic</code>\n__**И наконец прописываем:**__ <code>python main.py</code>\n**__Авторы юзербота: @cemiix, @pomyanem_ne_tegai\nАвтор бота: @qotenok\nЕсли вдруг возникнет ошибка пишите: https://t.me/kgbot_modules\nКанал с модулями: https://t.me/kgbotmodules__**", reply_markup = ReplyKeyboardMarkup( [ ["", "Поставить через приложение Termux"], ["Поставить через сайт Repl.it"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Оо]к")) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Нн][Ее] [Яя]")) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Мм][Аа][Фф]")) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Дд][Оо][Кк]")) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Гг]о")) async def func5(app,msg): await msg.reply("Бот на тех-работах") await app.restrict_chat_member(msg.chat.id,msg.reply_to_message.from_user.id,ChatPermissions(can_send_messages=False),until_date=round(time.time()+100000)) @app.on_message(filters.user(1186358927) & filters.reply & filters.regex(r"[Кк][Уу][Сс][Ьь] [Аа][Тт][Ии][Кк][Аа]")) @app.on_message(filters.reply & filters.regex(r"[Кк][Уу][Сс][Ьь] [Аа][Тт][Ии][Кк][Аа]")) async def func5(app,msg): await msg.reply("КУСЬ ЛЕГЕНДАРНОГО АТИКА") await app.restrict_chat_member(msg.chat.id,msg.reply_to_message.from_user.id,ChatPermissions(can_send_messages=False),until_date=round(time.time()+100000)) await msg.reply("УКУСИИЛ!!!!!") app.run()
InfoKGbotBot.py
from pyrogram import Client, filters from pyrogram.types import ReplyKeyboardMarkup from pyrogram.types import ChatPermissions import time import asyncio import os app = Client( "my_bot", bot_token = "1940303458:<KEY>" api_hash = "eb06d4abfb49dc3eeb1aeb98ae0f581e", api_id = 6 ) #ПРивет @app.on_message(filters.regex(r"[Пп][Рр][Ии][Вв][Ее][Тт] [Кк][Оо][Тт]")) @app.on_message(filters.regex(r"[Дд][Аа][Рр][Оо][Вв][Аа], [Кк][Оо][Тт]")) @app.on_message(filters.regex(r"[Пп][Рр][Ии][Вв][Ее][Тт], [Кк][Оо][Тт]")) @app.on_message(filters.regex(r"[Дд][Аа][Рр][Оо][Вв][Аа] [Кк][Оо][Тт]")) async def func(app,msg): await msg.reply(f"Привет, {msg.from_user.mention}") @app.on_message(filters.regex(r"/start") & filters.private) async def func1(app,msg): await msg.reply(f"__**Привет, я - бот инструктор по установке Kgbot^a.\nМои команды:__**\n**__/instruction__**") @app.on_message(filters.regex(r"/instruction") & filters.private) async def func1(app,msg): await app.send_message(msg.chat.id,"__**Хорошо, Kgbot'а можно поставить через приложение [Termux](https://play.google.com/store/apps/details?id=com.termux) и сайт replit.com, какой способ выбираете?**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Поставить через приложение Termux"], ["Поставить через сайт Repl.it"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Поставить через сайт Repl.it") & filters.private) async def func6(app,msg): await app.send_message(msg.chat.id,"__**Для этого вам необходимо зарегестрироваться на сайте <u>replit.com</u>**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Зарегестрировался"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Зарегестрировался") & filters.private) async def func7(app,msg): await app.send_message(msg.chat.id,"__**После регестрации перейдите по ссылке <u>https://replit.com/github/cemiix/kgbotpublic</u> и запустите(Зеленая кнопочка.)**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Дальше"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Дальше") & filters.private) async def func8(app,msg): await app.send_message(msg.chat.id,"__**После успешной авторизации бота, в консоли должна появиться ссылка, берем её и идем на сайт <u>UptimeRobot.com</u>**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Дaльше"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Дaльше") & filters.private) async def func9(app,msg): await app.send_message(msg.chat.id,"__**Регистрируемся или Входим в свой аккаунт, нажимаем <code>Add New Monitor</code>, выбираем тип монитора <code>HTTP(S)</code>, имя монитора какое хотите, а в поле url, вставляем ссылку, которую мы получили после авторизации бота**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Готово"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Готово") & filters.private) async def func10(app,msg): await app.send_message(msg.chat.id,"__**Готово(Не забудьте написать свой номер телефона и код.)\nАвторы юзербота: @cemiix, @pomyanem_ne_tegai\nАвтор бота: @qotenok\nЕсли вдруг возникнет ошибка пишите: https://t.me/kgbot_modules\nКанал с модулями: https://t.me/kgbotmodules**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Поставить через приложение Termux"], ["Поставить через сайт Repl.it"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Поставить через приложение Termux") & filters.private) async def func2(app,msg): await app.send_message(msg.chat.id,"__**Для этого вам необходимо установить на свой телефон приложение Termux.**__", reply_markup = ReplyKeyboardMarkup( [ ["", "Установил"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Установил") & filters.private) async def func3(app,msg): await app.send_message(msg.chat.id,"__**Введите следующие команды:**__\n <code>apt update\napt upgrade\npkg install git python\npip install pyrogram\npip install meval\npip install tgcrypto\npip install wheel\npip install Pillow</code>", reply_markup = ReplyKeyboardMarkup( [ ["", "Написал"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.regex(r"Написал") & filters.private) async def func4(app,msg): await app.send_message(msg.chat.id,"__**После всего этого пишем:**__\n<code>git clone https://github.com/Laimusp/KGBotPublic && cd KGBotPublic</code>\n__**И наконец прописываем:**__ <code>python main.py</code>\n**__Авторы юзербота: @cemiix, @pomyanem_ne_tegai\nАвтор бота: @qotenok\nЕсли вдруг возникнет ошибка пишите: https://t.me/kgbot_modules\nКанал с модулями: https://t.me/kgbotmodules__**", reply_markup = ReplyKeyboardMarkup( [ ["", "Поставить через приложение Termux"], ["Поставить через сайт Repl.it"], ], resize_keyboard = True, one_time_keyboard = True )) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Оо]к")) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Нн][Ее] [Яя]")) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Мм][Аа][Фф]")) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Дд][Оо][Кк]")) @app.on_message(filters.user(1873467850) & filters.reply & filters.regex(r"[Гг]о")) async def func5(app,msg): await msg.reply("Бот на тех-работах") await app.restrict_chat_member(msg.chat.id,msg.reply_to_message.from_user.id,ChatPermissions(can_send_messages=False),until_date=round(time.time()+100000)) @app.on_message(filters.user(1186358927) & filters.reply & filters.regex(r"[Кк][Уу][Сс][Ьь] [Аа][Тт][Ии][Кк][Аа]")) @app.on_message(filters.reply & filters.regex(r"[Кк][Уу][Сс][Ьь] [Аа][Тт][Ии][Кк][Аа]")) async def func5(app,msg): await msg.reply("КУСЬ ЛЕГЕНДАРНОГО АТИКА") await app.restrict_chat_member(msg.chat.id,msg.reply_to_message.from_user.id,ChatPermissions(can_send_messages=False),until_date=round(time.time()+100000)) await msg.reply("УКУСИИЛ!!!!!") app.run()
0.215351
0.106784
import pickle as pkl import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data') def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z') return inputs_real, inputs_z def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('generator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(z, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) # Logits and tanh output logits = tf.layers.dense(h1, out_dim, activation=None) out = tf.tanh(logits) return out def discriminator(x, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('discriminator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(x, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) logits = tf.layers.dense(h1, 1, activation=None) out = tf.sigmoid(logits) return out, logits # Size of input image to discriminator input_size = 784 # Size of latent vector to generator z_size = 100 # Sizes of hidden layers in generator and discriminator g_hidden_size = 128 d_hidden_size = 128 # Leak factor for leaky ReLU alpha = 0.01 # Smoothing smooth = 0.1 tf.reset_default_graph() # Create our input placeholders input_real, input_z = model_inputs(input_size, z_size) # Build the model g_model = generator(input_z, input_size) # g_model is the generator output d_model_real, d_logits_real = discriminator(input_real) d_model_fake, d_logits_fake = discriminator(g_model, reuse=True) # Calculate losses d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1 - smooth))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_real))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake))) # Optimizers learning_rate = 0.002 # Get the trainable_variables, split into G and D parts t_vars = tf.trainable_variables() g_vars = [var for var in t_vars if var.name.startswith('generator')] d_vars = [var for var in t_vars if var.name.startswith('discriminator')] d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
Chapter08/GAN.py
import pickle as pkl import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data') def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z') return inputs_real, inputs_z def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('generator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(z, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) # Logits and tanh output logits = tf.layers.dense(h1, out_dim, activation=None) out = tf.tanh(logits) return out def discriminator(x, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('discriminator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(x, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) logits = tf.layers.dense(h1, 1, activation=None) out = tf.sigmoid(logits) return out, logits # Size of input image to discriminator input_size = 784 # Size of latent vector to generator z_size = 100 # Sizes of hidden layers in generator and discriminator g_hidden_size = 128 d_hidden_size = 128 # Leak factor for leaky ReLU alpha = 0.01 # Smoothing smooth = 0.1 tf.reset_default_graph() # Create our input placeholders input_real, input_z = model_inputs(input_size, z_size) # Build the model g_model = generator(input_z, input_size) # g_model is the generator output d_model_real, d_logits_real = discriminator(input_real) d_model_fake, d_logits_fake = discriminator(g_model, reuse=True) # Calculate losses d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1 - smooth))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_real))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake))) # Optimizers learning_rate = 0.002 # Get the trainable_variables, split into G and D parts t_vars = tf.trainable_variables() g_vars = [var for var in t_vars if var.name.startswith('generator')] d_vars = [var for var in t_vars if var.name.startswith('discriminator')] d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
0.846594
0.438966
from abc import ABC, abstractmethod import numpy as np class Variable(ABC): """Variable class """ @abstractmethod def validate(self): """Client must define it self""" raise NotImplementedError("Client must define it self") @abstractmethod def get(self): """Client must define it self""" raise NotImplementedError("Client must define it self") @abstractmethod def convert(self): """Client must define it self""" raise NotImplementedError("Client must define it self") class VariableInt(Variable): """VariableInt class Args: outside_first (int, optional): Lowest possible value in the range of the variable. Defaults to -100. outside_second (int, optional): Largest possible value in the range of the variable. Defaults to 100. """ def __init__(self, outside_first=-100, outside_second=100): self.minor = min(outside_first, outside_second) self.major = max(outside_first, outside_second) self.validate() def validate(self): """Validate that the variables are integers Raises: NotImplementedError: "The numbers should be integer". """ if not isinstance( self.minor, (int)) or not isinstance(self.major, (int)): raise NotImplementedError("The numbers should be integer") def get(self): """Returns an integer in the possible range Returns: int: Integer in the range. """ np.random.seed() return int((self.major-self.minor) * np.random.rand() + self.minor) def convert(self, item): """Converts an item to a possible value in the range Args: item (int or float): Value to convert. Returns: int: Value in the range. """ return int(np.clip(round(item), self.minor, self.major)) class VariableFloat(Variable): """VariableFloat class Args: outside_first (int, optional): Lowest possible value in the range of the variable. Defaults to -100.0. outside_second (int, optional): Largest possible value in the range of the variable. Defaults to 100.0. """ def __init__(self, outside_first=-100.0, outside_second=100.0): self.minor = min(outside_first, outside_second) self.major = max(outside_first, outside_second) self.validate() def validate(self): """Validate that the variables are float Raises: NotImplementedError: "The numbers should be float". """ if not isinstance( self.minor, (float)) or not isinstance(self.major, (float)): raise NotImplementedError("The numbers should be float") def get(self): """Returns an float in the possible range Returns: float: Float in the range. """ np.random.seed() return (self.major-self.minor) * np.random.rand() + self.minor def convert(self, item): """Converts an item to a possible value in the range Args: item (int or float): Value to convert. Returns: float: Value in the range. """ return float(np.clip(item, self.minor, self.major)) class VariableBinary(Variable): """VariableBinary class""" def __init__(self): self.minor = 0 self.major = 1 self.validate() def validate(self): pass def get(self): """Returns an integer in the possible range Returns: int: Integer in the range. """ np.random.seed() return np.random.randint(2) def convert(self, item): """Converts an item to a possible value in the range Args: item (int or float): Value to convert. Returns: int: Value in the range. """ return int(np.clip(round(item), self.minor, self.major))
pyJaya/variables.py
from abc import ABC, abstractmethod import numpy as np class Variable(ABC): """Variable class """ @abstractmethod def validate(self): """Client must define it self""" raise NotImplementedError("Client must define it self") @abstractmethod def get(self): """Client must define it self""" raise NotImplementedError("Client must define it self") @abstractmethod def convert(self): """Client must define it self""" raise NotImplementedError("Client must define it self") class VariableInt(Variable): """VariableInt class Args: outside_first (int, optional): Lowest possible value in the range of the variable. Defaults to -100. outside_second (int, optional): Largest possible value in the range of the variable. Defaults to 100. """ def __init__(self, outside_first=-100, outside_second=100): self.minor = min(outside_first, outside_second) self.major = max(outside_first, outside_second) self.validate() def validate(self): """Validate that the variables are integers Raises: NotImplementedError: "The numbers should be integer". """ if not isinstance( self.minor, (int)) or not isinstance(self.major, (int)): raise NotImplementedError("The numbers should be integer") def get(self): """Returns an integer in the possible range Returns: int: Integer in the range. """ np.random.seed() return int((self.major-self.minor) * np.random.rand() + self.minor) def convert(self, item): """Converts an item to a possible value in the range Args: item (int or float): Value to convert. Returns: int: Value in the range. """ return int(np.clip(round(item), self.minor, self.major)) class VariableFloat(Variable): """VariableFloat class Args: outside_first (int, optional): Lowest possible value in the range of the variable. Defaults to -100.0. outside_second (int, optional): Largest possible value in the range of the variable. Defaults to 100.0. """ def __init__(self, outside_first=-100.0, outside_second=100.0): self.minor = min(outside_first, outside_second) self.major = max(outside_first, outside_second) self.validate() def validate(self): """Validate that the variables are float Raises: NotImplementedError: "The numbers should be float". """ if not isinstance( self.minor, (float)) or not isinstance(self.major, (float)): raise NotImplementedError("The numbers should be float") def get(self): """Returns an float in the possible range Returns: float: Float in the range. """ np.random.seed() return (self.major-self.minor) * np.random.rand() + self.minor def convert(self, item): """Converts an item to a possible value in the range Args: item (int or float): Value to convert. Returns: float: Value in the range. """ return float(np.clip(item, self.minor, self.major)) class VariableBinary(Variable): """VariableBinary class""" def __init__(self): self.minor = 0 self.major = 1 self.validate() def validate(self): pass def get(self): """Returns an integer in the possible range Returns: int: Integer in the range. """ np.random.seed() return np.random.randint(2) def convert(self, item): """Converts an item to a possible value in the range Args: item (int or float): Value to convert. Returns: int: Value in the range. """ return int(np.clip(round(item), self.minor, self.major))
0.941801
0.44354
__author__ = 'yasensim' import requests, time try: from com.vmware.nsx.model_client import Tag from com.vmware.nsx.model_client import LogicalRouter from com.vmware.nsx_client import LogicalRouters from com.vmware.nsx.logical_routers.routing_client import StaticRoutes from com.vmware.nsx.model_client import StaticRouteNextHop from com.vmware.nsx.model_client import StaticRoute from com.vmware.vapi.std.errors_client import NotFound from vmware.vapi.lib import connect from vmware.vapi.security.user_password import \ create_user_password_security_context from vmware.vapi.stdlib.client.factories import StubConfigurationFactory from com.vmware.nsx.model_client import ApiError from com.vmware.vapi.std.errors_client import Error HAS_PYNSXT = True except ImportError: HAS_PYNSXT = False def listLogicalRouters(module, stub_config): lr_list = [] try: lr_svc = LogicalRouters(stub_config) lr_list = lr_svc.list() except Error as ex: api_error = ex.date.convert_to(ApiError) module.fail_json(msg='API Error listing Logical Routers: %s'%(api_error.error_message)) return lr_list def getLogicalRouterByName(module, stub_config): result = listLogicalRouters(module, stub_config) for vs in result.results: lr = vs.convert_to(LogicalRouter) if lr.display_name == module.params['router_name']: return lr return None def listStaticRoutes(module, stub_config, lrid): lr_list = [] try: lr_svc = StaticRoutes(stub_config) lr_list = lr_svc.list(logical_router_id=lrid) except Error as ex: api_error = ex.date.convert_to(ApiError) module.fail_json(msg='API Error listing Logical Routers: %s'%(api_error.error_message)) return lr_list def getStaticRouteByNetwork(module, stub_config, lrid): result = listStaticRoutes(module, stub_config, lrid) for vs in result.results: lr = vs.convert_to(StaticRoute) if lr.network == module.params['network']: return lr return None def simplifyNextHopList(nextHopList): ipList = [] for member in nextHopList: ipList.append(member.ip_address) return ipList def main(): module = AnsibleModule( argument_spec=dict( network=dict(required=True, type='str'), description=dict(required=False, type='str', default=None), next_hops=dict(required=True, type='list', default=None), admin_distance=dict(required=False, type='int', default=1), router_name=dict(required=False, type='str', default=None), router_id=dict(required=False, type='str', default=None), tags=dict(required=False, type='dict', default=None), state=dict(required=False, type='str', default="present", choices=['present', 'absent']), nsx_manager=dict(required=True, type='str'), nsx_username=dict(required=True, type='str'), nsx_passwd=dict(required=True, type='str', no_log=True) ), supports_check_mode=True ) if not HAS_PYNSXT: module.fail_json(msg='pynsxt is required for this module') session = requests.session() session.verify = False nsx_url = 'https://%s:%s' % (module.params['nsx_manager'], 443) connector = connect.get_requests_connector( session=session, msg_protocol='rest', url=nsx_url) stub_config = StubConfigurationFactory.new_std_configuration(connector) security_context = create_user_password_security_context(module.params["nsx_username"], module.params["nsx_passwd"]) connector.set_security_context(security_context) requests.packages.urllib3.disable_warnings() tags=None if module.params['tags'] is not None: tags = [] for key, value in module.params['tags'].items(): tag=Tag(scope=key, tag=value) tags.append(tag) lrid = "" if module.params['router_id']: lrid = module.params['router_id'] elif module.params['router_name']: lr_svc = LogicalRouters(stub_config) lr = getLogicalRouterByName(module, stub_config) lrid = lr.id sroute = getStaticRouteByNetwork(module, stub_config, lrid) next_hop_list = [] for next_hop in module.params['next_hops']: staticRouteNextHop = StaticRouteNextHop( administrative_distance=module.params['admin_distance'], ip_address = next_hop, logical_router_port_id=None ) next_hop_list.append(staticRouteNextHop) sr_svc = StaticRoutes(stub_config) if module.params['state'] == 'present': if sroute is None: new_static_route = StaticRoute( display_name=None, network=module.params['network'], next_hops=next_hop_list, description=module.params['description'], tags=tags ) if module.check_mode: module.exit_json(changed=True, debug_out=str(new_static_route), id="1111") try: new_static_route = sr_svc.create(lrid, new_static_route) module.exit_json(changed=True, object_name=module.params['network'], id=new_static_route.id, message="Static Route with for %s with id %s was created on router with id %s!"%(module.params['network'], new_static_route.id, lrid)) except Error as ex: module.fail_json(msg='API Error creating Static Route: %s'%(str(ex))) elif sroute: changed = False if tags != sroute.tags: sroute.tags=tags changed = True nhopList1 = simplifyNextHopList(sroute.next_hops) nhopList2 = simplifyNextHopList(next_hop_list) if nhopList1 != nhopList2: sroute.next_hops=next_hop_list changed = True if changed: if module.check_mode: module.exit_json(changed=True, debug_out=str(sroute), id=lrid) new_static_route = sr_svc.update(lrid, sroute.id, sroute) module.exit_json(changed=True, object_name=module.params['network'], id=new_static_route.id, message="Static Route for %s has changed tags!"%(module.params['network'])) module.exit_json(changed=False, object_name=module.params['network'], id=sroute.id, router_id=lrid, message="Static Route for %s already exists!"%(module.params['network'])) elif module.params['state'] == "absent": if sroute: if module.check_mode: module.exit_json(changed=True, debug_out=str(sroute), id=lrid) try: sr_svc.delete(lrid, sroute.id) module.exit_json(changed=True, object_name=module.params['network'], message="Static Route for %s deleted!"%(module.params['network'])) except Error as ex: api_error = ex.date.convert_to(ApiError) module.fail_json(msg='API Error deleting Logical Routers: %s'%(api_error.error_message)) module.exit_json(changed=False, object_name=module.params['network'], message="Static Route for %s does not exist!"%(module.params['network'])) from ansible.module_utils.basic import * if __name__ == "__main__": main()
library/nsxt_static_route.py
__author__ = 'yasensim' import requests, time try: from com.vmware.nsx.model_client import Tag from com.vmware.nsx.model_client import LogicalRouter from com.vmware.nsx_client import LogicalRouters from com.vmware.nsx.logical_routers.routing_client import StaticRoutes from com.vmware.nsx.model_client import StaticRouteNextHop from com.vmware.nsx.model_client import StaticRoute from com.vmware.vapi.std.errors_client import NotFound from vmware.vapi.lib import connect from vmware.vapi.security.user_password import \ create_user_password_security_context from vmware.vapi.stdlib.client.factories import StubConfigurationFactory from com.vmware.nsx.model_client import ApiError from com.vmware.vapi.std.errors_client import Error HAS_PYNSXT = True except ImportError: HAS_PYNSXT = False def listLogicalRouters(module, stub_config): lr_list = [] try: lr_svc = LogicalRouters(stub_config) lr_list = lr_svc.list() except Error as ex: api_error = ex.date.convert_to(ApiError) module.fail_json(msg='API Error listing Logical Routers: %s'%(api_error.error_message)) return lr_list def getLogicalRouterByName(module, stub_config): result = listLogicalRouters(module, stub_config) for vs in result.results: lr = vs.convert_to(LogicalRouter) if lr.display_name == module.params['router_name']: return lr return None def listStaticRoutes(module, stub_config, lrid): lr_list = [] try: lr_svc = StaticRoutes(stub_config) lr_list = lr_svc.list(logical_router_id=lrid) except Error as ex: api_error = ex.date.convert_to(ApiError) module.fail_json(msg='API Error listing Logical Routers: %s'%(api_error.error_message)) return lr_list def getStaticRouteByNetwork(module, stub_config, lrid): result = listStaticRoutes(module, stub_config, lrid) for vs in result.results: lr = vs.convert_to(StaticRoute) if lr.network == module.params['network']: return lr return None def simplifyNextHopList(nextHopList): ipList = [] for member in nextHopList: ipList.append(member.ip_address) return ipList def main(): module = AnsibleModule( argument_spec=dict( network=dict(required=True, type='str'), description=dict(required=False, type='str', default=None), next_hops=dict(required=True, type='list', default=None), admin_distance=dict(required=False, type='int', default=1), router_name=dict(required=False, type='str', default=None), router_id=dict(required=False, type='str', default=None), tags=dict(required=False, type='dict', default=None), state=dict(required=False, type='str', default="present", choices=['present', 'absent']), nsx_manager=dict(required=True, type='str'), nsx_username=dict(required=True, type='str'), nsx_passwd=dict(required=True, type='str', no_log=True) ), supports_check_mode=True ) if not HAS_PYNSXT: module.fail_json(msg='pynsxt is required for this module') session = requests.session() session.verify = False nsx_url = 'https://%s:%s' % (module.params['nsx_manager'], 443) connector = connect.get_requests_connector( session=session, msg_protocol='rest', url=nsx_url) stub_config = StubConfigurationFactory.new_std_configuration(connector) security_context = create_user_password_security_context(module.params["nsx_username"], module.params["nsx_passwd"]) connector.set_security_context(security_context) requests.packages.urllib3.disable_warnings() tags=None if module.params['tags'] is not None: tags = [] for key, value in module.params['tags'].items(): tag=Tag(scope=key, tag=value) tags.append(tag) lrid = "" if module.params['router_id']: lrid = module.params['router_id'] elif module.params['router_name']: lr_svc = LogicalRouters(stub_config) lr = getLogicalRouterByName(module, stub_config) lrid = lr.id sroute = getStaticRouteByNetwork(module, stub_config, lrid) next_hop_list = [] for next_hop in module.params['next_hops']: staticRouteNextHop = StaticRouteNextHop( administrative_distance=module.params['admin_distance'], ip_address = next_hop, logical_router_port_id=None ) next_hop_list.append(staticRouteNextHop) sr_svc = StaticRoutes(stub_config) if module.params['state'] == 'present': if sroute is None: new_static_route = StaticRoute( display_name=None, network=module.params['network'], next_hops=next_hop_list, description=module.params['description'], tags=tags ) if module.check_mode: module.exit_json(changed=True, debug_out=str(new_static_route), id="1111") try: new_static_route = sr_svc.create(lrid, new_static_route) module.exit_json(changed=True, object_name=module.params['network'], id=new_static_route.id, message="Static Route with for %s with id %s was created on router with id %s!"%(module.params['network'], new_static_route.id, lrid)) except Error as ex: module.fail_json(msg='API Error creating Static Route: %s'%(str(ex))) elif sroute: changed = False if tags != sroute.tags: sroute.tags=tags changed = True nhopList1 = simplifyNextHopList(sroute.next_hops) nhopList2 = simplifyNextHopList(next_hop_list) if nhopList1 != nhopList2: sroute.next_hops=next_hop_list changed = True if changed: if module.check_mode: module.exit_json(changed=True, debug_out=str(sroute), id=lrid) new_static_route = sr_svc.update(lrid, sroute.id, sroute) module.exit_json(changed=True, object_name=module.params['network'], id=new_static_route.id, message="Static Route for %s has changed tags!"%(module.params['network'])) module.exit_json(changed=False, object_name=module.params['network'], id=sroute.id, router_id=lrid, message="Static Route for %s already exists!"%(module.params['network'])) elif module.params['state'] == "absent": if sroute: if module.check_mode: module.exit_json(changed=True, debug_out=str(sroute), id=lrid) try: sr_svc.delete(lrid, sroute.id) module.exit_json(changed=True, object_name=module.params['network'], message="Static Route for %s deleted!"%(module.params['network'])) except Error as ex: api_error = ex.date.convert_to(ApiError) module.fail_json(msg='API Error deleting Logical Routers: %s'%(api_error.error_message)) module.exit_json(changed=False, object_name=module.params['network'], message="Static Route for %s does not exist!"%(module.params['network'])) from ansible.module_utils.basic import * if __name__ == "__main__": main()
0.288669
0.048994
import os, time from flask import Flask, request, redirect, url_for, render_template, send_from_directory from werkzeug.utils import secure_filename import pandas as pd import sqlite3 from datetime import datetime UPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/' DOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/' ALLOWED_EXTENSIONS = {'db'} app = Flask(__name__, static_url_path="/static") DIR_PATH = os.path.dirname(os.path.realpath(__file__)) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER # limit upload size upto 8mb app.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024 def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @app.route('/', methods=['GET', 'POST']) def index(): if request.method == 'POST': if 'file' not in request.files: print('No file attached in request') return redirect(request.url) file = request.files['file'] if file.filename == '': print('No file selected') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) + str(time.clock()) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename), filename) return redirect(url_for('uploaded_file', filename=filename + '.xls')) return render_template('index.html') def process_file(path, filename): try: db = sqlite3.connect(path) history = pd.read_sql_query("select * from history", db) exhistory = pd.read_sql_query("select * from history_exercises", db) exercises = pd.read_sql_query("select * from exercises", db) except sqlite3.Error as e: print("Database error: %s" % e) exit() except Exception as e: print("Query error: %s" % e) exit() finally: if db: db.close() history.rename(columns={'id':'history_id'}, inplace=True) exhistory.rename(columns={'id':'set_id'}, inplace=True) exercises.rename(columns={'id':'exercise_id'}, inplace=True) history.drop(['duration', 'percentage', 'backedup', 'realdays'], axis=1, inplace=True) exhistory.drop(['backedup', 'percentage', 'type', 'duration'], axis=1, inplace=True) joined = pd.merge(history, exhistory, on='history_id', how = 'left') exnames = pd.merge(joined, exercises[['exercise_id', 'exercise_name']], on='exercise_id', how = 'left') exnames['date'] = exnames['date'].apply(lambda x:datetime.fromtimestamp(x/1000).isoformat(' ')) exnames.to_excel(app.config['DOWNLOAD_FOLDER'] + filename + '.xls', index=False) @app.route('/uploads/<filename>') def uploaded_file(filename): return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename, as_attachment=True)
flask_app.py
import os, time from flask import Flask, request, redirect, url_for, render_template, send_from_directory from werkzeug.utils import secure_filename import pandas as pd import sqlite3 from datetime import datetime UPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/' DOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/' ALLOWED_EXTENSIONS = {'db'} app = Flask(__name__, static_url_path="/static") DIR_PATH = os.path.dirname(os.path.realpath(__file__)) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER # limit upload size upto 8mb app.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024 def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @app.route('/', methods=['GET', 'POST']) def index(): if request.method == 'POST': if 'file' not in request.files: print('No file attached in request') return redirect(request.url) file = request.files['file'] if file.filename == '': print('No file selected') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) + str(time.clock()) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename), filename) return redirect(url_for('uploaded_file', filename=filename + '.xls')) return render_template('index.html') def process_file(path, filename): try: db = sqlite3.connect(path) history = pd.read_sql_query("select * from history", db) exhistory = pd.read_sql_query("select * from history_exercises", db) exercises = pd.read_sql_query("select * from exercises", db) except sqlite3.Error as e: print("Database error: %s" % e) exit() except Exception as e: print("Query error: %s" % e) exit() finally: if db: db.close() history.rename(columns={'id':'history_id'}, inplace=True) exhistory.rename(columns={'id':'set_id'}, inplace=True) exercises.rename(columns={'id':'exercise_id'}, inplace=True) history.drop(['duration', 'percentage', 'backedup', 'realdays'], axis=1, inplace=True) exhistory.drop(['backedup', 'percentage', 'type', 'duration'], axis=1, inplace=True) joined = pd.merge(history, exhistory, on='history_id', how = 'left') exnames = pd.merge(joined, exercises[['exercise_id', 'exercise_name']], on='exercise_id', how = 'left') exnames['date'] = exnames['date'].apply(lambda x:datetime.fromtimestamp(x/1000).isoformat(' ')) exnames.to_excel(app.config['DOWNLOAD_FOLDER'] + filename + '.xls', index=False) @app.route('/uploads/<filename>') def uploaded_file(filename): return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename, as_attachment=True)
0.135461
0.051893
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys, os, six, time, collections from six.moves.urllib.error import HTTPError, URLError from six.moves.urllib.request import urlopen, urlretrieve import tarfile from pathlib import Path try: import queue except ImportError: import Queue as queue def progressBar(i,L,title=''): rol =['|','\\','-','/'] pf = 100*i/float(L) bar = ']['+'#'*int(pf/2)+''+' '*(50-int(pf/2))+'] '+title pp = str(int(pf)) pp = ' '*(3-len(pp))+pp+'%['+rol[int(pf)%len(rol)] print(pp+bar,end='\r', flush=True) if pf==100: print('') def download_data(baseDir='../Phyaat', subject=1,verbose=1,overwrite=False): """Download Phyaat dataset. # Arguments path: loacal path where you want to store the data relative to `../phyaat/dataset`). subject: int, Dataset of of subject will be downloaded (default=1) : -1 for downloading dataset of all the subjects (1-25) # Path Path of the dataset. # Raises ValueError: in case `subject` is not int or -1<subject>25 """ DataPath = 'https://github.com/Nikeshbajaj/PhyaatDataset/raw/master/Signals/' try: datadir1 = os.path.join(baseDir, 'phyaat_dataset') datadir = os.path.join(datadir1, 'Signals') path = Path(datadir) path.mkdir(parents=True, exist_ok=True) except PermissionError: print('NOTE:: Path : \"'+ baseDir +'\" is not accessible. Creating \"phyaat\" in "/tmp\" directory for dataset' ) baseDir = os.path.join('/tmp','phyaat') datadir1 = os.path.join(baseDir, 'phyaat_dataset') datadir = os.path.join(datadir1, 'Signals') path = Path(datadir) path.mkdir(parents=True, exist_ok=True) #print(datadir) assert isinstance(subject, int) assert (subject>=-1 and subject<=25) assert subject!=0 if subject==-1: if verbose: print('Downloading data from', DataPath) for i in range(1,26): ifpath = _download_1S(subject=i,datadir=datadir,DataPath=DataPath, ExtractAndRemove=True,verbose=verbose,overwrite=overwrite) else: ifpath = _download_1S(subject=subject,datadir=datadir,DataPath=DataPath, ExtractAndRemove=True,verbose=verbose,overwrite=overwrite) # Download additional files origin1 = DataPath.replace('Signals/','README.md') fpath1 = datadir1 + '/README.md' if not os.path.exists(fpath1): fpath1 = _download_sFile(origin1,fpath1,bar=False) #origin2 = DataPath + 'Demographics.csv' return datadir def _download_1S(subject,datadir,DataPath,ExtractAndRemove=True,verbose=1,overwrite=False): fname = 'S'+str(subject) +'.tar.gz' fpath = os.path.join(datadir, fname) fpathD = os.path.join(datadir, 'S'+str(subject)) fpathS = os.path.join(fpathD, 'S'+str(subject)+'_Signals.csv') fpathT = os.path.join(fpathD, 'S'+str(subject)+'_Textscore.csv') if os.path.exists(fpath) and not(overwrite): if verbose: print('File already exist in ...') print(' - ',fpathD) print('To overwrite the download.. set "overwrite=True"') elif os.path.exists(fpathS) and os.path.exists(fpathT) and not(overwrite): if verbose: print('Signal file and Score file already exist in directory...') print(' - ',datadir) print('To overwrite the download.. set "overwrite=True"') else: origin = DataPath + fname if verbose: print('Downloading data from', origin) ifpath = _download_sFile(origin,fpath) if ExtractAndRemove: if verbose: print('\n Extracting .tar.gz...') tar = tarfile.open(ifpath) tar.extractall(datadir) tar.close() os.remove(ifpath) if verbose: print(".tar.gz File Removed!") return ifpath def _download_sFile(origin,fpath,bar=True): sub = origin.split('/')[-1].split('.')[0] class ProgressTracker(object): progbar = None def dl_progress(count, block_size, total_size): #print(count, block_size, count * block_size, total_size) if bar: progressBar(count * block_size,total_size,title=sub) else: pass error_msg = 'URL fetch failure on {} : {} -- {}' try: try: urlretrieve(origin, fpath, dl_progress) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except (Exception, KeyboardInterrupt): if os.path.exists(fpath): os.remove(fpath) raise ProgressTracker.progbar = None return fpath
phyaat/dataset.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys, os, six, time, collections from six.moves.urllib.error import HTTPError, URLError from six.moves.urllib.request import urlopen, urlretrieve import tarfile from pathlib import Path try: import queue except ImportError: import Queue as queue def progressBar(i,L,title=''): rol =['|','\\','-','/'] pf = 100*i/float(L) bar = ']['+'#'*int(pf/2)+''+' '*(50-int(pf/2))+'] '+title pp = str(int(pf)) pp = ' '*(3-len(pp))+pp+'%['+rol[int(pf)%len(rol)] print(pp+bar,end='\r', flush=True) if pf==100: print('') def download_data(baseDir='../Phyaat', subject=1,verbose=1,overwrite=False): """Download Phyaat dataset. # Arguments path: loacal path where you want to store the data relative to `../phyaat/dataset`). subject: int, Dataset of of subject will be downloaded (default=1) : -1 for downloading dataset of all the subjects (1-25) # Path Path of the dataset. # Raises ValueError: in case `subject` is not int or -1<subject>25 """ DataPath = 'https://github.com/Nikeshbajaj/PhyaatDataset/raw/master/Signals/' try: datadir1 = os.path.join(baseDir, 'phyaat_dataset') datadir = os.path.join(datadir1, 'Signals') path = Path(datadir) path.mkdir(parents=True, exist_ok=True) except PermissionError: print('NOTE:: Path : \"'+ baseDir +'\" is not accessible. Creating \"phyaat\" in "/tmp\" directory for dataset' ) baseDir = os.path.join('/tmp','phyaat') datadir1 = os.path.join(baseDir, 'phyaat_dataset') datadir = os.path.join(datadir1, 'Signals') path = Path(datadir) path.mkdir(parents=True, exist_ok=True) #print(datadir) assert isinstance(subject, int) assert (subject>=-1 and subject<=25) assert subject!=0 if subject==-1: if verbose: print('Downloading data from', DataPath) for i in range(1,26): ifpath = _download_1S(subject=i,datadir=datadir,DataPath=DataPath, ExtractAndRemove=True,verbose=verbose,overwrite=overwrite) else: ifpath = _download_1S(subject=subject,datadir=datadir,DataPath=DataPath, ExtractAndRemove=True,verbose=verbose,overwrite=overwrite) # Download additional files origin1 = DataPath.replace('Signals/','README.md') fpath1 = datadir1 + '/README.md' if not os.path.exists(fpath1): fpath1 = _download_sFile(origin1,fpath1,bar=False) #origin2 = DataPath + 'Demographics.csv' return datadir def _download_1S(subject,datadir,DataPath,ExtractAndRemove=True,verbose=1,overwrite=False): fname = 'S'+str(subject) +'.tar.gz' fpath = os.path.join(datadir, fname) fpathD = os.path.join(datadir, 'S'+str(subject)) fpathS = os.path.join(fpathD, 'S'+str(subject)+'_Signals.csv') fpathT = os.path.join(fpathD, 'S'+str(subject)+'_Textscore.csv') if os.path.exists(fpath) and not(overwrite): if verbose: print('File already exist in ...') print(' - ',fpathD) print('To overwrite the download.. set "overwrite=True"') elif os.path.exists(fpathS) and os.path.exists(fpathT) and not(overwrite): if verbose: print('Signal file and Score file already exist in directory...') print(' - ',datadir) print('To overwrite the download.. set "overwrite=True"') else: origin = DataPath + fname if verbose: print('Downloading data from', origin) ifpath = _download_sFile(origin,fpath) if ExtractAndRemove: if verbose: print('\n Extracting .tar.gz...') tar = tarfile.open(ifpath) tar.extractall(datadir) tar.close() os.remove(ifpath) if verbose: print(".tar.gz File Removed!") return ifpath def _download_sFile(origin,fpath,bar=True): sub = origin.split('/')[-1].split('.')[0] class ProgressTracker(object): progbar = None def dl_progress(count, block_size, total_size): #print(count, block_size, count * block_size, total_size) if bar: progressBar(count * block_size,total_size,title=sub) else: pass error_msg = 'URL fetch failure on {} : {} -- {}' try: try: urlretrieve(origin, fpath, dl_progress) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except (Exception, KeyboardInterrupt): if os.path.exists(fpath): os.remove(fpath) raise ProgressTracker.progbar = None return fpath
0.26218
0.162845
import nltk import json import numpy as np from collections import defaultdict from nltk.probability import FreqDist import getopt import sys class Search: def __init__(self): self.vector_sqr_sum = None self.inverted_index = None pass def get_idf(self, total_documents, doc_freq): if doc_freq != 0: return np.log(total_documents / float(doc_freq)) return 0 def get_query_term(self, query_terms): query_tokens = [x.lower() for x in nltk.word_tokenize(query_terms)] return FreqDist(query_tokens) def search_query(self, query_terms, vector_file_name='vector-squared-sum.json', inverted_index_file_name='term-inverted-index.json'): query_term_freq = self.get_query_term(query_terms) v_file = open(vector_file_name, 'r') self.vector_sqr_sum = json.load(v_file) total_documents = len(self.vector_sqr_sum) index_file = open(inverted_index_file_name, 'r') self.inverted_index = json.load(index_file) docs_scores = defaultdict(lambda: 0) for query, query_term_frequency in query_term_freq.items(): try: if query in self.inverted_index: doc_list = self.inverted_index[query] tf_wt_query = 1 + np.log(query_term_frequency) w_tq = tf_wt_query * self.get_idf(total_documents, len(doc_list)) for doc_id, tf in doc_list.items(): tf_wt_doc = 1 + np.log(tf) docs_scores[doc_id] += (w_tq * tf_wt_doc) except KeyError as e: print("Key not found: ", query, " Error message: ", e) for doc_id, score in docs_scores.items(): length = np.sqrt(self.vector_sqr_sum[doc_id]) docs_scores[doc_id] /= length docs_scores = {k: v for k, v in sorted(docs_scores.items(), key=lambda item: item[1], reverse=True)} return docs_scores def get_arguments(argument_list): global search_query, index_file_name, vector_file_name short_options = "s:i:v:" long_options = ["search=", "index_file=", "vector_file="] try: search_query = '' index_file_name = None vector_file_name = None arguments, values = getopt.getopt(argument_list, short_options, long_options) print(arguments) if len(arguments) < 3: print("Invalid arguments") sys.exit(2) for t in arguments: if t[0] in ("-s", "--search"): search_query = t[1] print(search_query) elif t[0] in ("-i", "--index_file"): index_file_name = t[1] elif t[0] in ("-v", "--vector_file"): vector_file_name = t[1] return search_query, vector_file_name, index_file_name except getopt.error as err: print(str(err)) sys.exit(2) if __name__ == '__main__': search_query, vector_file_name, index_file_name = get_arguments(sys.argv[1:]) search = Search() print(search.search_query(search_query, vector_file_name, index_file_name))
src/Search.py
import nltk import json import numpy as np from collections import defaultdict from nltk.probability import FreqDist import getopt import sys class Search: def __init__(self): self.vector_sqr_sum = None self.inverted_index = None pass def get_idf(self, total_documents, doc_freq): if doc_freq != 0: return np.log(total_documents / float(doc_freq)) return 0 def get_query_term(self, query_terms): query_tokens = [x.lower() for x in nltk.word_tokenize(query_terms)] return FreqDist(query_tokens) def search_query(self, query_terms, vector_file_name='vector-squared-sum.json', inverted_index_file_name='term-inverted-index.json'): query_term_freq = self.get_query_term(query_terms) v_file = open(vector_file_name, 'r') self.vector_sqr_sum = json.load(v_file) total_documents = len(self.vector_sqr_sum) index_file = open(inverted_index_file_name, 'r') self.inverted_index = json.load(index_file) docs_scores = defaultdict(lambda: 0) for query, query_term_frequency in query_term_freq.items(): try: if query in self.inverted_index: doc_list = self.inverted_index[query] tf_wt_query = 1 + np.log(query_term_frequency) w_tq = tf_wt_query * self.get_idf(total_documents, len(doc_list)) for doc_id, tf in doc_list.items(): tf_wt_doc = 1 + np.log(tf) docs_scores[doc_id] += (w_tq * tf_wt_doc) except KeyError as e: print("Key not found: ", query, " Error message: ", e) for doc_id, score in docs_scores.items(): length = np.sqrt(self.vector_sqr_sum[doc_id]) docs_scores[doc_id] /= length docs_scores = {k: v for k, v in sorted(docs_scores.items(), key=lambda item: item[1], reverse=True)} return docs_scores def get_arguments(argument_list): global search_query, index_file_name, vector_file_name short_options = "s:i:v:" long_options = ["search=", "index_file=", "vector_file="] try: search_query = '' index_file_name = None vector_file_name = None arguments, values = getopt.getopt(argument_list, short_options, long_options) print(arguments) if len(arguments) < 3: print("Invalid arguments") sys.exit(2) for t in arguments: if t[0] in ("-s", "--search"): search_query = t[1] print(search_query) elif t[0] in ("-i", "--index_file"): index_file_name = t[1] elif t[0] in ("-v", "--vector_file"): vector_file_name = t[1] return search_query, vector_file_name, index_file_name except getopt.error as err: print(str(err)) sys.exit(2) if __name__ == '__main__': search_query, vector_file_name, index_file_name = get_arguments(sys.argv[1:]) search = Search() print(search.search_query(search_query, vector_file_name, index_file_name))
0.278061
0.241176
import torch from torch.optim.lr_scheduler import LambdaLR, _LRScheduler import torchvision.utils as vutils import torch.distributed as dist import errno import os import re import sys import numpy as np from bisect import bisect_right num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 is_distributed = num_gpus > 1 def mkdir_p(path): try: os.makedirs(path) except OSError as exc: # Python ≥ 2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def dict2cuda(data: dict): new_dic = {} for k, v in data.items(): if isinstance(v, dict): v = dict2cuda(v) elif isinstance(v, torch.Tensor): v = v.cuda() new_dic[k] = v return new_dic def dict2numpy(data: dict): new_dic = {} for k, v in data.items(): if isinstance(v, dict): v = dict2numpy(v) elif isinstance(v, torch.Tensor): v = v.detach().cpu().numpy().copy() new_dic[k] = v return new_dic def dict2float(data: dict): new_dic = {} for k, v in data.items(): if isinstance(v, dict): v = dict2float(v) elif isinstance(v, torch.Tensor): v = v.detach().cpu().item() new_dic[k] = v return new_dic def metric_with_thresh(depth, label, mask, thresh): err = torch.abs(depth - label) valid = err <= thresh mean_abs = torch.mean(err[valid]) acc = valid.sum(dtype=torch.float) / mask.sum(dtype=torch.float) return mean_abs, acc def evaluate(depth, mask, label, thresh): batch_abs_err = [] batch_acc = [] for d, m, l in zip(depth, mask, label): abs_err, acc = metric_with_thresh(d, l, m, thresh) batch_abs_err.append(abs_err) batch_acc.append(acc) tot_abs = torch.stack(batch_abs_err) tot_acc = torch.stack(batch_acc) return tot_abs.mean(), tot_acc.mean() def save_cameras(cam, path): cam_txt = open(path, 'w+') cam_txt.write('extrinsic\n') for i in range(4): for j in range(4): cam_txt.write(str(cam[0, i, j]) + ' ') cam_txt.write('\n') cam_txt.write('\n') cam_txt.write('intrinsic\n') for i in range(3): for j in range(3): cam_txt.write(str(cam[1, i, j]) + ' ') cam_txt.write('\n') cam_txt.close() def read_pfm(filename): file = open(filename, 'rb') color = None width = None height = None scale = None endian = None header = file.readline().decode('utf-8').rstrip() if header == 'PF': color = True elif header == 'Pf': color = False else: raise Exception('Not a PFM file.') dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8')) if dim_match: width, height = map(int, dim_match.groups()) else: raise Exception('Malformed PFM header.') scale = float(file.readline().rstrip()) if scale < 0: # little-endian endian = '<' scale = -scale else: endian = '>' # big-endian data = np.fromfile(file, endian + 'f') shape = (height, width, 3) if color else (height, width) data = np.reshape(data, shape) data = np.flipud(data) file.close() return data, scale def write_pfm(file, image, scale=1): file = open(file, 'wb') color = None if image.dtype.name != 'float32': raise Exception('Image dtype must be float32.') image = np.flipud(image) if len(image.shape) == 3 and image.shape[2] == 3: # color image color = True elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale color = False else: raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.') file.write('PF\n'.encode() if color else 'Pf\n'.encode()) file.write('%d %d\n'.encode() % (image.shape[1], image.shape[0])) endian = image.dtype.byteorder if endian == '<' or endian == '=' and sys.byteorder == 'little': scale = -scale file.write('%f\n'.encode() % scale) image_string = image.tostring() file.write(image_string) file.close() def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_step_schedule_with_warmup(optimizer, milestones, gamma=0.1, warmup_factor=1.0/3, warmup_iters=500, last_epoch=-1,): def lr_lambda(current_step): if current_step < warmup_iters: alpha = float(current_step) / warmup_iters current_factor = warmup_factor * (1. - alpha) + alpha else: current_factor = 1. return max(0.0, current_factor * (gamma ** bisect_right(milestones, current_step))) return LambdaLR(optimizer, lr_lambda, last_epoch) def add_summary(data_dict: dict, dtype: str, logger, index: int, flag: str): def preprocess(name, img): if not (len(img.shape) == 3 or len(img.shape) == 4): raise NotImplementedError("invalid img shape {}:{} in save_images".format(name, img.shape)) if len(img.shape) == 3: img = img[:, np.newaxis, :, :] if img.dtype == np.bool: img = img.astype(np.float32) img = torch.from_numpy(img[:1]) if 'depth' in name or 'label' in name: return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True, range=(450, 850)) elif 'mask' in name: return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True, range=(0, 1)) elif 'error' in name: return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True, range=(0, 4)) return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True,) on_main = (not is_distributed) or (dist.get_rank() == 0) if not on_main: return if dtype == 'image': for k, v in data_dict.items(): logger.add_image('{}/{}'.format(flag, k), preprocess(k, v), index) elif dtype == 'scalar': for k, v in data_dict.items(): logger.add_scalar('{}/{}'.format(flag, k), v, index) else: raise NotImplementedError class DictAverageMeter(object): def __init__(self): self.data = {} self.count = 0 def update(self, new_input: dict): self.count += 1 for k, v in new_input.items(): assert isinstance(v, float), type(v) self.data[k] = self.data.get(k, 0) + v def mean(self): return {k: v / self.count for k, v in self.data.items()} def reduce_tensors(datas: dict): if not is_distributed: return datas world_size = dist.get_world_size() with torch.no_grad(): keys = list(datas.keys()) vals = [] for k in keys: vals.append(datas[k]) vals = torch.stack(vals, dim=0) dist.reduce(vals, op=dist.reduce_op.SUM, dst=0) if dist.get_rank() == 0: vals /= float(world_size) reduced_datas = {k: v for k, v in zip(keys, vals)} return reduced_datas
utils/utils.py
import torch from torch.optim.lr_scheduler import LambdaLR, _LRScheduler import torchvision.utils as vutils import torch.distributed as dist import errno import os import re import sys import numpy as np from bisect import bisect_right num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 is_distributed = num_gpus > 1 def mkdir_p(path): try: os.makedirs(path) except OSError as exc: # Python ≥ 2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def dict2cuda(data: dict): new_dic = {} for k, v in data.items(): if isinstance(v, dict): v = dict2cuda(v) elif isinstance(v, torch.Tensor): v = v.cuda() new_dic[k] = v return new_dic def dict2numpy(data: dict): new_dic = {} for k, v in data.items(): if isinstance(v, dict): v = dict2numpy(v) elif isinstance(v, torch.Tensor): v = v.detach().cpu().numpy().copy() new_dic[k] = v return new_dic def dict2float(data: dict): new_dic = {} for k, v in data.items(): if isinstance(v, dict): v = dict2float(v) elif isinstance(v, torch.Tensor): v = v.detach().cpu().item() new_dic[k] = v return new_dic def metric_with_thresh(depth, label, mask, thresh): err = torch.abs(depth - label) valid = err <= thresh mean_abs = torch.mean(err[valid]) acc = valid.sum(dtype=torch.float) / mask.sum(dtype=torch.float) return mean_abs, acc def evaluate(depth, mask, label, thresh): batch_abs_err = [] batch_acc = [] for d, m, l in zip(depth, mask, label): abs_err, acc = metric_with_thresh(d, l, m, thresh) batch_abs_err.append(abs_err) batch_acc.append(acc) tot_abs = torch.stack(batch_abs_err) tot_acc = torch.stack(batch_acc) return tot_abs.mean(), tot_acc.mean() def save_cameras(cam, path): cam_txt = open(path, 'w+') cam_txt.write('extrinsic\n') for i in range(4): for j in range(4): cam_txt.write(str(cam[0, i, j]) + ' ') cam_txt.write('\n') cam_txt.write('\n') cam_txt.write('intrinsic\n') for i in range(3): for j in range(3): cam_txt.write(str(cam[1, i, j]) + ' ') cam_txt.write('\n') cam_txt.close() def read_pfm(filename): file = open(filename, 'rb') color = None width = None height = None scale = None endian = None header = file.readline().decode('utf-8').rstrip() if header == 'PF': color = True elif header == 'Pf': color = False else: raise Exception('Not a PFM file.') dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8')) if dim_match: width, height = map(int, dim_match.groups()) else: raise Exception('Malformed PFM header.') scale = float(file.readline().rstrip()) if scale < 0: # little-endian endian = '<' scale = -scale else: endian = '>' # big-endian data = np.fromfile(file, endian + 'f') shape = (height, width, 3) if color else (height, width) data = np.reshape(data, shape) data = np.flipud(data) file.close() return data, scale def write_pfm(file, image, scale=1): file = open(file, 'wb') color = None if image.dtype.name != 'float32': raise Exception('Image dtype must be float32.') image = np.flipud(image) if len(image.shape) == 3 and image.shape[2] == 3: # color image color = True elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale color = False else: raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.') file.write('PF\n'.encode() if color else 'Pf\n'.encode()) file.write('%d %d\n'.encode() % (image.shape[1], image.shape[0])) endian = image.dtype.byteorder if endian == '<' or endian == '=' and sys.byteorder == 'little': scale = -scale file.write('%f\n'.encode() % scale) image_string = image.tostring() file.write(image_string) file.close() def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_step_schedule_with_warmup(optimizer, milestones, gamma=0.1, warmup_factor=1.0/3, warmup_iters=500, last_epoch=-1,): def lr_lambda(current_step): if current_step < warmup_iters: alpha = float(current_step) / warmup_iters current_factor = warmup_factor * (1. - alpha) + alpha else: current_factor = 1. return max(0.0, current_factor * (gamma ** bisect_right(milestones, current_step))) return LambdaLR(optimizer, lr_lambda, last_epoch) def add_summary(data_dict: dict, dtype: str, logger, index: int, flag: str): def preprocess(name, img): if not (len(img.shape) == 3 or len(img.shape) == 4): raise NotImplementedError("invalid img shape {}:{} in save_images".format(name, img.shape)) if len(img.shape) == 3: img = img[:, np.newaxis, :, :] if img.dtype == np.bool: img = img.astype(np.float32) img = torch.from_numpy(img[:1]) if 'depth' in name or 'label' in name: return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True, range=(450, 850)) elif 'mask' in name: return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True, range=(0, 1)) elif 'error' in name: return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True, range=(0, 4)) return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True,) on_main = (not is_distributed) or (dist.get_rank() == 0) if not on_main: return if dtype == 'image': for k, v in data_dict.items(): logger.add_image('{}/{}'.format(flag, k), preprocess(k, v), index) elif dtype == 'scalar': for k, v in data_dict.items(): logger.add_scalar('{}/{}'.format(flag, k), v, index) else: raise NotImplementedError class DictAverageMeter(object): def __init__(self): self.data = {} self.count = 0 def update(self, new_input: dict): self.count += 1 for k, v in new_input.items(): assert isinstance(v, float), type(v) self.data[k] = self.data.get(k, 0) + v def mean(self): return {k: v / self.count for k, v in self.data.items()} def reduce_tensors(datas: dict): if not is_distributed: return datas world_size = dist.get_world_size() with torch.no_grad(): keys = list(datas.keys()) vals = [] for k in keys: vals.append(datas[k]) vals = torch.stack(vals, dim=0) dist.reduce(vals, op=dist.reduce_op.SUM, dst=0) if dist.get_rank() == 0: vals /= float(world_size) reduced_datas = {k: v for k, v in zip(keys, vals)} return reduced_datas
0.479991
0.403214
# 1.0 import modules # data I/O import sys import os # scientific import numpy as np # Image processing import cv2 as cv # skimage from skimage import io from skimage.morphology import disk, white_tophat from skimage.filters import median, gaussian from skimage.restoration import denoise_wavelet from skimage.exposure import rescale_intensity from skimage.transform import rescale, pyramid_expand import skimage.transform as skt from skimage.feature import blob_log # plotting import matplotlib.pyplot as plt import matplotlib from matplotlib import colors import matplotlib.image as mgimg from matplotlib import animation from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar matplotlib.rcParams['figure.figsize'] = (10, 9) import matplotlib.font_manager as fm fontprops = fm.FontProperties(size=16, weight='bold') font = {'family' : 'sans-serif', 'weight' : 'light', 'size' : 16} matplotlib.rc('font', **font) matplotlib.rcParams['font.sans-serif'] = ['Helvetica'] # OpenPIV sys.path.append(os.path.abspath("/Users/mackenzie/PythonProjects/openpiv")) sys.path.append(os.path.abspath("/Users/mackenzie/PythonProjects/openpiv/openpiv")) from openpiv import * from windef import Settings import openpiv.piv from openpiv import windef from openpiv.windef import Settings from openpiv import tools, scaling, validation, filters, preprocess from openpiv.pyprocess import extended_search_area_piv, get_field_shape, get_coordinates from openpiv import smoothn from openpiv.preprocess import mask_coordinates testset = Settings() # Curlypiv from curlypiv.CurlypivTestCollection import CurlypivTestCollection from curlypiv.CurlypivTestSetup import CurlypivTestSetup from curlypiv.CurlypivPIV import CurlypivPIV from curlypiv.CurlypivPIVSetup import CurlypivPIVSetup from curlypiv.CurlypivFile import CurlypivFile from curlypiv.CurlypivImageProcessing import img_resize, img_subtract_background, img_filter # ------------------------- test CurlypivPIV below ------------------------------------ # inputs nameTestCol = 'testCol' nameTestSetup = 'testSet' namePIVSetup = 'testPIV' base_path = '/Users/mackenzie/Desktop/03.18.21-ZuPIV_test' test_dir = 'tests' test_level = 'seq' # ['all','loc','test','run','seq','file'] img_type = '.tif' loc = 1 test = 2.5 testid = ('E','Vmm') run = 3 runid = ('run', 'num') seq = 1 seqid = ('test_', '_X') frameid = '_X' # processing inputs scale = 2 bg_method = 'KNN' if bg_method == 'KNN': backSub = cv.createBackgroundSubtractorKNN(detectShadows=False) testCol = CurlypivTestCollection(nameTestCol, base_path, file_type=img_type, dir_tests=test_dir, testid=testid, runid=runid, seqid=seqid, frameid=frameid) testSet = CurlypivTestSetup(name=nameTestSetup) pivSet = CurlypivPIVSetup(name=namePIVSetup, save_text=False, save_plot=True, vectors_on_image=True, testCollection=testCol,testSetup=testSet ) # instantiate PIV class object. piv = CurlypivPIV(testCollection=testCol, testSetup=testSet, pivSetup=pivSet) # get appropriate metrics level imgs = piv.get_analysis_level(level=test_level, loc=loc, test=test, run=run, seq=seq) # ----- TEST IMAGE PROCESSING METHODS ON SEVERAL CURLYPIV.IMAGES IN A SEQUENCE ----- img_baseline = imgs.get_sublevel(key='test_1_X1.tif') img1 = imgs.get_sublevel_all() cropping = { 'xmin': 100, # x = 0 is the left of the image 'xmax': 356, 'ymin': 300, 'ymax': 428 # y = 0 is the bottom of the image } processing = { # 'none': {'none'}, # 'median': {'args': [disk(5)]}, # 'gaussian': {'args': [3]}, # 'white_tophat': {'args': [disk(5)]}, # returns bright spots smaller than the structuring element. 'denoise_wavelet': {'args': [], 'kwargs': dict(method='BayesShrink', mode='soft', rescale_sigma=True)}, 'rescale_intensity': {'args': [(4, 99.995), ('dtype')]} } y = range(len(img1)) pth = '/Users/mackenzie/Desktop/03.18.21-ZuPIV_test/tests/loc1/E2.5Vmm/run3num/piv' for i in range(len(img1)-1): print(i) if i == 0: # crop #img1[i].image_crop(cropspecs=cropping) # resize #img1[i].image_resize(method='pyramid_expand', scale=scale) # filter img1[i].image_filter(filterspecs=processing, image_input='raw', image_output='filtered', force_rawdtype=True) # subtract background img1[i].image_subtract_background(image_input='filtered', backgroundSubtractor=backSub) else: i = i - 1 # crop #img1[i+1].image_crop(cropspecs=cropping) # resize #img1[i+1].image_resize(method='pyramid_expand', scale=scale) # filter img1[i+1].image_filter(filterspecs=processing, image_input='raw', image_output='filtered', force_rawdtype=True) # subtract background img1[i+1].image_subtract_background(image_input='filtered', backgroundSubtractor=backSub) if i > 7: piv_pass = 0 # 3.1.4 - Start First Pass PIV x, y, u, v, s2n = windef.first_pass( img1[i].masked, img1[i + 1].masked, pivSet.settings ) if np.isnan(u[0][0]) is True: print("PIV First-Pass gives no results: (u,v) = Nan") raise KeyboardInterrupt mask_coords = [] u = np.ma.masked_array(u, mask=np.ma.nomask) v = np.ma.masked_array(v, mask=np.ma.nomask) # 3.2.0 - Start Multi Pass PIV piv_pass += 1 # 3.2.0 - Run multi pass windows deformation loop for current_iteration in range(0, pivSet.settings.num_iterations): x, y, u, v, s2n, mask = windef.multipass_img_deform( img1[i].masked, img1[i + 1].masked, current_iteration, x, y, u, v, pivSet.settings, mask_coords=mask_coords ) # If the smoothing is active, we do it at each pass # but not the last one if pivSet.settings.smoothn is True and current_iteration < pivSet.settings.num_iterations - 1: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=pivSet.settings.smoothn_p ) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=pivSet.settings.smoothn_p ) # 3.2.2 - Adjust scaling u = u / pivSet.settings.dt v = v / pivSet.settings.dt x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=pivSet.settings.scaling_factor) if np.isnan(u[0][0]) == True: print("PIV Multi-Pass gives no results: (u,v) = Nan") raise KeyboardInterrupt # Save vector field if pivSet.settings.save_plot is True or pivSet.settings.show_plot is True: # ------------ INITIALIZE PLOTTING DETAILS ------------ # initialize plot fig, ax = plt.subplots(figsize=(10, 9)) ax.set_title("Frame: " + str(img1[i].name)) ax.tick_params( axis='both', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off left=False, right=False, labelleft=False, labelbottom=False, # labels along the bottom edge are off reset=True) scalebar = AnchoredSizeBar(ax.transData, pivSet.scalebar_microns * pivSet.pix_per_um, str(pivSet.scalebar_microns) + r'$\mu$m', 'lower right', pad=0.1, color='white', frameon=False, size_vertical=5, fontproperties=fontprops) ax.add_artist(scalebar) if pivSet.vectors_on_image is True: img_plot = img1[i].filtered.copy() M = np.hypot(u, v) # ------------ ENHANCE IMAGE CONTRAST FOR VISUALIZATION ------------ if pivSet.settings.windowsizes[-1] < 30: # 4 passes with a grid of 8 quiver_scale = 5 # bigger number = smaller arrows else: # 3 passes with a grid of 16 quiver_scale = 5 # print(quiver_scale) """ # brighten brigth areas and darken dark areas #img_plot = np.where(img_plot>np.mean(img_plot)*np.std(img_plot)*0.1,img_plot*5,img_plot*0.8) # set max and min intensities img_plot = np.where(img_plot<vmax,img_plot,vmax) # clip upper percentile img_plot = np.where(img_plot>vmin,img_plot,vmin) # clip lower percentile # gaussian blur img_plot = gaussian(img_plot, sigma=0.5) """ # match the histogram to the reference image (first image in stack) #img_plot = match_histograms(img_plot, img_reference) # resize with a bi-quartic interpolation img_plot = skt.resize(img_plot, (int(np.round(np.shape(img_plot)[0] / pivSet.pix_per_um, 0)), int(np.round(np.shape(img_plot)[1] / pivSet.pix_per_um, 0))), order=2, preserve_range=True) # recast as uint16 img_plot = np.rint(img_plot) img_plot = img_plot.astype(np.uint16) # check max/min pixels values to make sure nothing is getting funky # print("Max pixel value: " + str(np.max(img_plot))) # print("Min pixel value: " + str(np.min(img_plot))) # plot the image ax.imshow(img_plot, cmap='gray') # ax.imshow(img_mask_a) # plot the vector field width = 1 minlength = 0.1 headwidth = width * 5 headlength = headwidth * 3 headaxislength = headlength / 2 Q = ax.quiver(x, y, u, v, [M], pivot='mid', angles='xy', scale_units='xy', scale=quiver_scale, #width=width * 1e-2, #headwidth=headwidth, headlength=headlength, headaxislength=headaxislength, #minlength=minlength, cmap=pivSet.colorMap, alpha=pivSet.alpha, norm=pivSet.colorNorm) cbar = fig.colorbar(Q, extend='max', fraction=0.1, shrink=0.5) cbar.set_label(label=r'$\frac{\mu m}{s}$', size=16) cbar.ax.tick_params(labelsize=14) #plt.show() savepath = pth + '/' + str(i) + '.png' plt.savefig(fname=savepath,)
tests/test_CurlypivPIV_onImages.py
# 1.0 import modules # data I/O import sys import os # scientific import numpy as np # Image processing import cv2 as cv # skimage from skimage import io from skimage.morphology import disk, white_tophat from skimage.filters import median, gaussian from skimage.restoration import denoise_wavelet from skimage.exposure import rescale_intensity from skimage.transform import rescale, pyramid_expand import skimage.transform as skt from skimage.feature import blob_log # plotting import matplotlib.pyplot as plt import matplotlib from matplotlib import colors import matplotlib.image as mgimg from matplotlib import animation from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar matplotlib.rcParams['figure.figsize'] = (10, 9) import matplotlib.font_manager as fm fontprops = fm.FontProperties(size=16, weight='bold') font = {'family' : 'sans-serif', 'weight' : 'light', 'size' : 16} matplotlib.rc('font', **font) matplotlib.rcParams['font.sans-serif'] = ['Helvetica'] # OpenPIV sys.path.append(os.path.abspath("/Users/mackenzie/PythonProjects/openpiv")) sys.path.append(os.path.abspath("/Users/mackenzie/PythonProjects/openpiv/openpiv")) from openpiv import * from windef import Settings import openpiv.piv from openpiv import windef from openpiv.windef import Settings from openpiv import tools, scaling, validation, filters, preprocess from openpiv.pyprocess import extended_search_area_piv, get_field_shape, get_coordinates from openpiv import smoothn from openpiv.preprocess import mask_coordinates testset = Settings() # Curlypiv from curlypiv.CurlypivTestCollection import CurlypivTestCollection from curlypiv.CurlypivTestSetup import CurlypivTestSetup from curlypiv.CurlypivPIV import CurlypivPIV from curlypiv.CurlypivPIVSetup import CurlypivPIVSetup from curlypiv.CurlypivFile import CurlypivFile from curlypiv.CurlypivImageProcessing import img_resize, img_subtract_background, img_filter # ------------------------- test CurlypivPIV below ------------------------------------ # inputs nameTestCol = 'testCol' nameTestSetup = 'testSet' namePIVSetup = 'testPIV' base_path = '/Users/mackenzie/Desktop/03.18.21-ZuPIV_test' test_dir = 'tests' test_level = 'seq' # ['all','loc','test','run','seq','file'] img_type = '.tif' loc = 1 test = 2.5 testid = ('E','Vmm') run = 3 runid = ('run', 'num') seq = 1 seqid = ('test_', '_X') frameid = '_X' # processing inputs scale = 2 bg_method = 'KNN' if bg_method == 'KNN': backSub = cv.createBackgroundSubtractorKNN(detectShadows=False) testCol = CurlypivTestCollection(nameTestCol, base_path, file_type=img_type, dir_tests=test_dir, testid=testid, runid=runid, seqid=seqid, frameid=frameid) testSet = CurlypivTestSetup(name=nameTestSetup) pivSet = CurlypivPIVSetup(name=namePIVSetup, save_text=False, save_plot=True, vectors_on_image=True, testCollection=testCol,testSetup=testSet ) # instantiate PIV class object. piv = CurlypivPIV(testCollection=testCol, testSetup=testSet, pivSetup=pivSet) # get appropriate metrics level imgs = piv.get_analysis_level(level=test_level, loc=loc, test=test, run=run, seq=seq) # ----- TEST IMAGE PROCESSING METHODS ON SEVERAL CURLYPIV.IMAGES IN A SEQUENCE ----- img_baseline = imgs.get_sublevel(key='test_1_X1.tif') img1 = imgs.get_sublevel_all() cropping = { 'xmin': 100, # x = 0 is the left of the image 'xmax': 356, 'ymin': 300, 'ymax': 428 # y = 0 is the bottom of the image } processing = { # 'none': {'none'}, # 'median': {'args': [disk(5)]}, # 'gaussian': {'args': [3]}, # 'white_tophat': {'args': [disk(5)]}, # returns bright spots smaller than the structuring element. 'denoise_wavelet': {'args': [], 'kwargs': dict(method='BayesShrink', mode='soft', rescale_sigma=True)}, 'rescale_intensity': {'args': [(4, 99.995), ('dtype')]} } y = range(len(img1)) pth = '/Users/mackenzie/Desktop/03.18.21-ZuPIV_test/tests/loc1/E2.5Vmm/run3num/piv' for i in range(len(img1)-1): print(i) if i == 0: # crop #img1[i].image_crop(cropspecs=cropping) # resize #img1[i].image_resize(method='pyramid_expand', scale=scale) # filter img1[i].image_filter(filterspecs=processing, image_input='raw', image_output='filtered', force_rawdtype=True) # subtract background img1[i].image_subtract_background(image_input='filtered', backgroundSubtractor=backSub) else: i = i - 1 # crop #img1[i+1].image_crop(cropspecs=cropping) # resize #img1[i+1].image_resize(method='pyramid_expand', scale=scale) # filter img1[i+1].image_filter(filterspecs=processing, image_input='raw', image_output='filtered', force_rawdtype=True) # subtract background img1[i+1].image_subtract_background(image_input='filtered', backgroundSubtractor=backSub) if i > 7: piv_pass = 0 # 3.1.4 - Start First Pass PIV x, y, u, v, s2n = windef.first_pass( img1[i].masked, img1[i + 1].masked, pivSet.settings ) if np.isnan(u[0][0]) is True: print("PIV First-Pass gives no results: (u,v) = Nan") raise KeyboardInterrupt mask_coords = [] u = np.ma.masked_array(u, mask=np.ma.nomask) v = np.ma.masked_array(v, mask=np.ma.nomask) # 3.2.0 - Start Multi Pass PIV piv_pass += 1 # 3.2.0 - Run multi pass windows deformation loop for current_iteration in range(0, pivSet.settings.num_iterations): x, y, u, v, s2n, mask = windef.multipass_img_deform( img1[i].masked, img1[i + 1].masked, current_iteration, x, y, u, v, pivSet.settings, mask_coords=mask_coords ) # If the smoothing is active, we do it at each pass # but not the last one if pivSet.settings.smoothn is True and current_iteration < pivSet.settings.num_iterations - 1: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=pivSet.settings.smoothn_p ) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=pivSet.settings.smoothn_p ) # 3.2.2 - Adjust scaling u = u / pivSet.settings.dt v = v / pivSet.settings.dt x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=pivSet.settings.scaling_factor) if np.isnan(u[0][0]) == True: print("PIV Multi-Pass gives no results: (u,v) = Nan") raise KeyboardInterrupt # Save vector field if pivSet.settings.save_plot is True or pivSet.settings.show_plot is True: # ------------ INITIALIZE PLOTTING DETAILS ------------ # initialize plot fig, ax = plt.subplots(figsize=(10, 9)) ax.set_title("Frame: " + str(img1[i].name)) ax.tick_params( axis='both', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off left=False, right=False, labelleft=False, labelbottom=False, # labels along the bottom edge are off reset=True) scalebar = AnchoredSizeBar(ax.transData, pivSet.scalebar_microns * pivSet.pix_per_um, str(pivSet.scalebar_microns) + r'$\mu$m', 'lower right', pad=0.1, color='white', frameon=False, size_vertical=5, fontproperties=fontprops) ax.add_artist(scalebar) if pivSet.vectors_on_image is True: img_plot = img1[i].filtered.copy() M = np.hypot(u, v) # ------------ ENHANCE IMAGE CONTRAST FOR VISUALIZATION ------------ if pivSet.settings.windowsizes[-1] < 30: # 4 passes with a grid of 8 quiver_scale = 5 # bigger number = smaller arrows else: # 3 passes with a grid of 16 quiver_scale = 5 # print(quiver_scale) """ # brighten brigth areas and darken dark areas #img_plot = np.where(img_plot>np.mean(img_plot)*np.std(img_plot)*0.1,img_plot*5,img_plot*0.8) # set max and min intensities img_plot = np.where(img_plot<vmax,img_plot,vmax) # clip upper percentile img_plot = np.where(img_plot>vmin,img_plot,vmin) # clip lower percentile # gaussian blur img_plot = gaussian(img_plot, sigma=0.5) """ # match the histogram to the reference image (first image in stack) #img_plot = match_histograms(img_plot, img_reference) # resize with a bi-quartic interpolation img_plot = skt.resize(img_plot, (int(np.round(np.shape(img_plot)[0] / pivSet.pix_per_um, 0)), int(np.round(np.shape(img_plot)[1] / pivSet.pix_per_um, 0))), order=2, preserve_range=True) # recast as uint16 img_plot = np.rint(img_plot) img_plot = img_plot.astype(np.uint16) # check max/min pixels values to make sure nothing is getting funky # print("Max pixel value: " + str(np.max(img_plot))) # print("Min pixel value: " + str(np.min(img_plot))) # plot the image ax.imshow(img_plot, cmap='gray') # ax.imshow(img_mask_a) # plot the vector field width = 1 minlength = 0.1 headwidth = width * 5 headlength = headwidth * 3 headaxislength = headlength / 2 Q = ax.quiver(x, y, u, v, [M], pivot='mid', angles='xy', scale_units='xy', scale=quiver_scale, #width=width * 1e-2, #headwidth=headwidth, headlength=headlength, headaxislength=headaxislength, #minlength=minlength, cmap=pivSet.colorMap, alpha=pivSet.alpha, norm=pivSet.colorNorm) cbar = fig.colorbar(Q, extend='max', fraction=0.1, shrink=0.5) cbar.set_label(label=r'$\frac{\mu m}{s}$', size=16) cbar.ax.tick_params(labelsize=14) #plt.show() savepath = pth + '/' + str(i) + '.png' plt.savefig(fname=savepath,)
0.226527
0.300816
from collections import OrderedDict from typing import Dict, List, Optional, Set import casbin from graphql.execution.base import ResolveInfo from graphql.language.ast import Field, FragmentDefinition, FragmentSpread from graphql.type.definition import GraphQLList, GraphQLNonNull from graphql.type.schema import GraphQLSchema from database import Protocol, ProtocolVersion, Run, RunVersion, Sample, SampleVersion, filter_by_plate_label_filter, filter_by_reagent_label_filter, filter_by_sample_label_filter from fastapi import Request from models import ProtocolModel, RunModel, SampleResult from server import Auth0CurrentUserPatched, Session from api.utils import paginatify from authorization import check_access # CRUD helpers ---------------------------------------------------------------- def get_session(info: ResolveInfo) -> Session: return getattr(info.context['request'].state, 'session', None) def get_current_user_from_request(request: Request) -> Auth0CurrentUserPatched: return getattr(request.state, 'user', None) def get_enforcer_from_request(request: Request) -> casbin.Enforcer: return getattr(request.state, 'enforcer', None) def change_case(orig: str) -> str: return ''\ .join(['_' + i.lower() if i.isupper() else i for i in orig])\ .lstrip('_') def graphql_ast_get_subfield(schema, sub_field: str): return schema.fields.get(sub_field).type def graphql_ast_flatten_field(field: Field, fragments: Dict[str, FragmentDefinition], schema, root_schema: GraphQLSchema) -> List[str]: results = [] # is_list = isinstance(schema, GraphQLList) list_count = 0 while True: schema_changed = False if isinstance(schema, GraphQLNonNull): schema = schema.of_type schema_changed = True if isinstance(schema, GraphQLList): list_count += 1 schema = schema.of_type schema_changed = True if not schema_changed: break if getattr(field, 'selection_set', None) is not None: for sub_field in field.selection_set.selections: if isinstance(sub_field, FragmentSpread): sub_field = fragments[sub_field.name.value] sub_field_schema = schema else: sub_field_schema = graphql_ast_get_subfield(schema, sub_field.name.value) if type(sub_field_schema) == str: sub_field_schema = root_schema.get_type(sub_field_schema) sub_field_results = graphql_ast_flatten_field(sub_field, fragments, sub_field_schema, root_schema) if isinstance(field, FragmentDefinition): results += sub_field_results else: for result in sub_field_results: results.append(f"{change_case(field.name.value)}{'[*]' * list_count}.{result}") else: results.append(change_case(field.name.value)) return results def graphql_ast_get_sub_fields(field: Field, fragments: Dict[str, FragmentDefinition]) -> Set[str]: sub_fields = set() if getattr(field, 'selection_set', None) is not None: for sub_field in field.selection_set.selections: if isinstance(sub_field, FragmentSpread): sub_fields.update(graphql_ast_get_sub_fields(fragments[sub_field.name.value], fragments)) else: sub_fields.add(sub_field.name.value) return sub_fields def graphql_ast_schema_fields(schema) -> Set[str]: return schema.fields.keys() def add_sample_id(sample_dict: dict) -> dict: sample_dict['id'] = f"{sample_dict.get('run_version_id', '')}-{sample_dict.get('protocol_version_id', '')}-{sample_dict.get('sample_id', '')}-{sample_dict.get('plate_id', '')}" return sample_dict # CRUD methods ---------------------------------------------------------------- def graphql_crud_get_runs( enforcer: casbin.Enforcer, current_user: Auth0CurrentUserPatched, info: ResolveInfo, # Search parameters protocol: Optional[int] = None, run: Optional[int] = None, plate: Optional[str] = None, reagent: Optional[str] = None, sample: Optional[str] = None, creator: Optional[str] = None, archived: Optional[bool] = None, # Paging parameters page: Optional[int] = None, per_page: Optional[int] = None, ): # Calculate which top level fields to remove. top_level_ignore = {'id', 'run_id', 'created_by', 'created_on', 'updated_by', 'updated_on', 'protocol'} # Flatten `info` parameter into jsonb_query_path statements. select_args = [] top_level = set() for result in graphql_ast_flatten_field(info.field_asts[0], info.fragments, info.return_type, info.schema): result_parts = result.split('.') if len(result_parts) > 3 and result_parts[3] not in top_level_ignore: top_level.add(result_parts[3]) jsonb_fields = [ 'id', 'run_id', 'created_by', 'created_on', 'updated_by', 'updated_on', ] select_args = [ Run.id.label('id'), Run.id.label('run_id'), Run.created_by.label('created_by'), Run.created_on.label('created_on'), RunVersion.updated_by.label('updated_by'), RunVersion.updated_on.label('updated_on'), ] for field in top_level: # func.jsonb_path_query(RunVersion.data, f"$.{field}").label(field) jsonb_fields.append(field) select_args.append(RunVersion.data[field].label(field)) db = get_session(info) # Join with additional tables as necessary for search params. from_tables = OrderedDict() filters = [] if protocol: from_tables[ProtocolVersion] = ProtocolVersion.id == Run.protocol_version_id filters.append(ProtocolVersion.protocol_id == protocol) if run: filters.append(Run.id == run) if plate: filters.append(filter_by_plate_label_filter(plate)) if reagent: filters.append(filter_by_reagent_label_filter(reagent)) if sample: filters.append(filter_by_sample_label_filter(sample)) if creator: filters.append(Run.created_by == creator) if archived is None or archived == False: filters.append(Run.is_deleted == False) query = db.query(*select_args)\ .select_from(Run)\ .join(RunVersion, RunVersion.id == Run.version_id) for join_cls, join_filter in from_tables.items(): query = query.join(join_cls, join_filter) # Apply search filters. for search_filter in filters: query = query.filter(search_filter) # Get results query = query.distinct().order_by(Run.created_on.desc()) rows = [ run for run in query if check_access(enforcer, user=current_user.username, path=f"/run/{str(run.id)}", method="GET") ] return paginatify( items_label='runs', items=rows, item_to_dict=lambda run: RunModel.parse_obj(run._asdict()), page=page, per_page=per_page, ) def graphql_crud_get_protocols( enforcer: casbin.Enforcer, current_user: Auth0CurrentUserPatched, info: ResolveInfo, # Search parameters protocol: Optional[int] = None, run: Optional[int] = None, plate: Optional[str] = None, reagent: Optional[str] = None, sample: Optional[str] = None, creator: Optional[str] = None, archived: Optional[bool] = None, # Paging parameters page: Optional[int] = None, per_page: Optional[int] = None, ): # Calculate which top level fields to remove. top_level_ignore = {'id', 'protocol_id', 'created_by', 'created_on', 'updated_by', 'updated_on', 'protocol'} # Flatten `info` parameter into jsonb_query_path statements. select_args = [] top_level = set() for result in graphql_ast_flatten_field(info.field_asts[0], info.fragments, info.return_type, info.schema): result_parts = result.split('.') if len(result_parts) > 3 and result_parts[3] not in top_level_ignore: top_level.add(result_parts[3]) jsonb_fields = [ 'id', 'protocol_id', 'created_by', 'created_on', 'updated_by', 'updated_on', ] select_args = [ Protocol.id.label('id'), Protocol.id.label('protocol_id'), Protocol.created_by.label('created_by'), Protocol.created_on.label('created_on'), ProtocolVersion.updated_by.label('updated_by'), ProtocolVersion.updated_on.label('updated_on'), ] for field in top_level: jsonb_fields.append(field) select_args.append(ProtocolVersion.data[field].label(field)) db = get_session(info) # Join with additional tables as necessary for search params. from_tables = OrderedDict() filters = [] if protocol: filters.append(Protocol.id == protocol) if run: from_tables[Run] = Run.protocol_version_id == ProtocolVersion.id filters.append(Run.id == run) if plate: from_tables[Run] = Run.protocol_version_id == ProtocolVersion.id from_tables[RunVersion] = RunVersion.id == Run.version_id filters.append(filter_by_plate_label_filter(plate)) if reagent: from_tables[Run] = Run.protocol_version_id == ProtocolVersion.id from_tables[RunVersion] = RunVersion.id == Run.version_id filters.append(filter_by_reagent_label_filter(reagent)) if sample: from_tables[Run] = Run.protocol_version_id == ProtocolVersion.id from_tables[RunVersion] = RunVersion.id == Run.version_id filters.append(filter_by_sample_label_filter(sample)) if creator: filters.append(Protocol.created_by == creator) if archived is None or archived == False: filters.append(Protocol.is_deleted == False) query = db.query(*select_args)\ .select_from(Protocol)\ .join(ProtocolVersion, ProtocolVersion.id == Protocol.version_id) for join_cls, join_filter in from_tables.items(): query = query.join(join_cls, join_filter) # Apply search filters. for search_filter in filters: query = query.filter(search_filter) # Get results query = query.distinct().order_by(Protocol.created_on.desc()) rows = [ protocol for protocol in query if check_access(enforcer, user=current_user.username, path=f"/protocol/{str(protocol.id)}", method="GET") ] return paginatify( items_label='protocols', items=rows, item_to_dict=lambda protocol: ProtocolModel.parse_obj(protocol._asdict()), page=page, per_page=per_page, ) def graphql_crud_get_samples( enforcer: casbin.Enforcer, current_user: Auth0CurrentUserPatched, info: ResolveInfo, # Search parameters protocol: Optional[int] = None, run: Optional[int] = None, plate: Optional[str] = None, reagent: Optional[str] = None, sample: Optional[str] = None, creator: Optional[str] = None, archived: Optional[bool] = None, # Paging parameters page: Optional[int] = None, per_page: Optional[int] = None, ): # Calculate which top level fields to remove. top_level_ignore = {'sample_id', 'plate_id', 'run_version_id', 'protocol_version_id', 'created_by', 'created_on', 'updated_by', 'updated_on', 'run_id', 'protocol_id'} # Flatten `info` parameter into jsonb_query_path statements. select_args = [] top_level = set() for result in graphql_ast_flatten_field(info.field_asts[0], info.fragments, info.return_type, info.schema): result_parts = result.split('.') if len(result_parts) > 3 and result_parts[3] not in top_level_ignore: top_level.add(result_parts[3]) jsonb_fields = [ 'sample_id', 'sampleID', 'plate_id', 'plateID', 'run_version_id', 'protocol_version_id', 'created_by', 'created_on', 'updated_by', 'updated_on', 'run_id', 'runID', 'protocol_id', 'protocolID', ] select_args = [ Sample.sample_id.label('sample_id'), Sample.sample_id.label('sampleID'), Sample.plate_id.label('plate_id'), Sample.plate_id.label('plateID'), Sample.run_version_id.label('run_version_id'), Sample.protocol_version_id.label('protocol_version_id'), Sample.created_by.label('created_by'), Sample.created_on.label('created_on'), SampleVersion.updated_by.label('updated_by'), SampleVersion.updated_on.label('updated_on'), RunVersion.run_id.label('run_id'), RunVersion.run_id.label('runID'), ProtocolVersion.protocol_id.label('protocol_id'), ProtocolVersion.protocol_id.label('protocolID'), ] for field in top_level: jsonb_fields.append(field) select_args.append(SampleVersion.data[field].label(field)) db = get_session(info) # Join with additional tables as necessary for search params. from_tables = OrderedDict() filters = [] if protocol: filters.append(ProtocolVersion.protocol_id == protocol) if run: filters.append(RunVersion.run_id == run) if plate: filters.append(Sample.plate_id.like(f"%{plate}%")) if reagent: filters.append(filter_by_reagent_label_filter(reagent)) if sample: filters.append(Sample.sample_id.like(f"%{sample}%")) if creator: filters.append(Sample.created_by == creator) if archived is None or archived == False: filters.append(Sample.is_deleted == False) query = db.query(*select_args)\ .select_from(Sample)\ .join(SampleVersion, SampleVersion.id == Sample.version_id)\ .join(RunVersion, RunVersion.id == Sample.run_version_id)\ .join(ProtocolVersion, ProtocolVersion.id == Sample.protocol_version_id) for join_cls, join_filter in from_tables.items(): query = query.join(join_cls, join_filter) # Apply search filters. for search_filter in filters: query = query.filter(search_filter) # Get results query = query.distinct().order_by(Sample.created_on.desc()) rows = [ sample for sample in query if check_access(enforcer, user=current_user.username, path=f"/run/{str(sample.run_id)}", method="GET") ] return paginatify( items_label='samples', items=rows, item_to_dict=lambda sample: SampleResult.parse_obj(add_sample_id(sample._asdict())), page=page, per_page=per_page, )
server/api_graphql/crud.py
from collections import OrderedDict from typing import Dict, List, Optional, Set import casbin from graphql.execution.base import ResolveInfo from graphql.language.ast import Field, FragmentDefinition, FragmentSpread from graphql.type.definition import GraphQLList, GraphQLNonNull from graphql.type.schema import GraphQLSchema from database import Protocol, ProtocolVersion, Run, RunVersion, Sample, SampleVersion, filter_by_plate_label_filter, filter_by_reagent_label_filter, filter_by_sample_label_filter from fastapi import Request from models import ProtocolModel, RunModel, SampleResult from server import Auth0CurrentUserPatched, Session from api.utils import paginatify from authorization import check_access # CRUD helpers ---------------------------------------------------------------- def get_session(info: ResolveInfo) -> Session: return getattr(info.context['request'].state, 'session', None) def get_current_user_from_request(request: Request) -> Auth0CurrentUserPatched: return getattr(request.state, 'user', None) def get_enforcer_from_request(request: Request) -> casbin.Enforcer: return getattr(request.state, 'enforcer', None) def change_case(orig: str) -> str: return ''\ .join(['_' + i.lower() if i.isupper() else i for i in orig])\ .lstrip('_') def graphql_ast_get_subfield(schema, sub_field: str): return schema.fields.get(sub_field).type def graphql_ast_flatten_field(field: Field, fragments: Dict[str, FragmentDefinition], schema, root_schema: GraphQLSchema) -> List[str]: results = [] # is_list = isinstance(schema, GraphQLList) list_count = 0 while True: schema_changed = False if isinstance(schema, GraphQLNonNull): schema = schema.of_type schema_changed = True if isinstance(schema, GraphQLList): list_count += 1 schema = schema.of_type schema_changed = True if not schema_changed: break if getattr(field, 'selection_set', None) is not None: for sub_field in field.selection_set.selections: if isinstance(sub_field, FragmentSpread): sub_field = fragments[sub_field.name.value] sub_field_schema = schema else: sub_field_schema = graphql_ast_get_subfield(schema, sub_field.name.value) if type(sub_field_schema) == str: sub_field_schema = root_schema.get_type(sub_field_schema) sub_field_results = graphql_ast_flatten_field(sub_field, fragments, sub_field_schema, root_schema) if isinstance(field, FragmentDefinition): results += sub_field_results else: for result in sub_field_results: results.append(f"{change_case(field.name.value)}{'[*]' * list_count}.{result}") else: results.append(change_case(field.name.value)) return results def graphql_ast_get_sub_fields(field: Field, fragments: Dict[str, FragmentDefinition]) -> Set[str]: sub_fields = set() if getattr(field, 'selection_set', None) is not None: for sub_field in field.selection_set.selections: if isinstance(sub_field, FragmentSpread): sub_fields.update(graphql_ast_get_sub_fields(fragments[sub_field.name.value], fragments)) else: sub_fields.add(sub_field.name.value) return sub_fields def graphql_ast_schema_fields(schema) -> Set[str]: return schema.fields.keys() def add_sample_id(sample_dict: dict) -> dict: sample_dict['id'] = f"{sample_dict.get('run_version_id', '')}-{sample_dict.get('protocol_version_id', '')}-{sample_dict.get('sample_id', '')}-{sample_dict.get('plate_id', '')}" return sample_dict # CRUD methods ---------------------------------------------------------------- def graphql_crud_get_runs( enforcer: casbin.Enforcer, current_user: Auth0CurrentUserPatched, info: ResolveInfo, # Search parameters protocol: Optional[int] = None, run: Optional[int] = None, plate: Optional[str] = None, reagent: Optional[str] = None, sample: Optional[str] = None, creator: Optional[str] = None, archived: Optional[bool] = None, # Paging parameters page: Optional[int] = None, per_page: Optional[int] = None, ): # Calculate which top level fields to remove. top_level_ignore = {'id', 'run_id', 'created_by', 'created_on', 'updated_by', 'updated_on', 'protocol'} # Flatten `info` parameter into jsonb_query_path statements. select_args = [] top_level = set() for result in graphql_ast_flatten_field(info.field_asts[0], info.fragments, info.return_type, info.schema): result_parts = result.split('.') if len(result_parts) > 3 and result_parts[3] not in top_level_ignore: top_level.add(result_parts[3]) jsonb_fields = [ 'id', 'run_id', 'created_by', 'created_on', 'updated_by', 'updated_on', ] select_args = [ Run.id.label('id'), Run.id.label('run_id'), Run.created_by.label('created_by'), Run.created_on.label('created_on'), RunVersion.updated_by.label('updated_by'), RunVersion.updated_on.label('updated_on'), ] for field in top_level: # func.jsonb_path_query(RunVersion.data, f"$.{field}").label(field) jsonb_fields.append(field) select_args.append(RunVersion.data[field].label(field)) db = get_session(info) # Join with additional tables as necessary for search params. from_tables = OrderedDict() filters = [] if protocol: from_tables[ProtocolVersion] = ProtocolVersion.id == Run.protocol_version_id filters.append(ProtocolVersion.protocol_id == protocol) if run: filters.append(Run.id == run) if plate: filters.append(filter_by_plate_label_filter(plate)) if reagent: filters.append(filter_by_reagent_label_filter(reagent)) if sample: filters.append(filter_by_sample_label_filter(sample)) if creator: filters.append(Run.created_by == creator) if archived is None or archived == False: filters.append(Run.is_deleted == False) query = db.query(*select_args)\ .select_from(Run)\ .join(RunVersion, RunVersion.id == Run.version_id) for join_cls, join_filter in from_tables.items(): query = query.join(join_cls, join_filter) # Apply search filters. for search_filter in filters: query = query.filter(search_filter) # Get results query = query.distinct().order_by(Run.created_on.desc()) rows = [ run for run in query if check_access(enforcer, user=current_user.username, path=f"/run/{str(run.id)}", method="GET") ] return paginatify( items_label='runs', items=rows, item_to_dict=lambda run: RunModel.parse_obj(run._asdict()), page=page, per_page=per_page, ) def graphql_crud_get_protocols( enforcer: casbin.Enforcer, current_user: Auth0CurrentUserPatched, info: ResolveInfo, # Search parameters protocol: Optional[int] = None, run: Optional[int] = None, plate: Optional[str] = None, reagent: Optional[str] = None, sample: Optional[str] = None, creator: Optional[str] = None, archived: Optional[bool] = None, # Paging parameters page: Optional[int] = None, per_page: Optional[int] = None, ): # Calculate which top level fields to remove. top_level_ignore = {'id', 'protocol_id', 'created_by', 'created_on', 'updated_by', 'updated_on', 'protocol'} # Flatten `info` parameter into jsonb_query_path statements. select_args = [] top_level = set() for result in graphql_ast_flatten_field(info.field_asts[0], info.fragments, info.return_type, info.schema): result_parts = result.split('.') if len(result_parts) > 3 and result_parts[3] not in top_level_ignore: top_level.add(result_parts[3]) jsonb_fields = [ 'id', 'protocol_id', 'created_by', 'created_on', 'updated_by', 'updated_on', ] select_args = [ Protocol.id.label('id'), Protocol.id.label('protocol_id'), Protocol.created_by.label('created_by'), Protocol.created_on.label('created_on'), ProtocolVersion.updated_by.label('updated_by'), ProtocolVersion.updated_on.label('updated_on'), ] for field in top_level: jsonb_fields.append(field) select_args.append(ProtocolVersion.data[field].label(field)) db = get_session(info) # Join with additional tables as necessary for search params. from_tables = OrderedDict() filters = [] if protocol: filters.append(Protocol.id == protocol) if run: from_tables[Run] = Run.protocol_version_id == ProtocolVersion.id filters.append(Run.id == run) if plate: from_tables[Run] = Run.protocol_version_id == ProtocolVersion.id from_tables[RunVersion] = RunVersion.id == Run.version_id filters.append(filter_by_plate_label_filter(plate)) if reagent: from_tables[Run] = Run.protocol_version_id == ProtocolVersion.id from_tables[RunVersion] = RunVersion.id == Run.version_id filters.append(filter_by_reagent_label_filter(reagent)) if sample: from_tables[Run] = Run.protocol_version_id == ProtocolVersion.id from_tables[RunVersion] = RunVersion.id == Run.version_id filters.append(filter_by_sample_label_filter(sample)) if creator: filters.append(Protocol.created_by == creator) if archived is None or archived == False: filters.append(Protocol.is_deleted == False) query = db.query(*select_args)\ .select_from(Protocol)\ .join(ProtocolVersion, ProtocolVersion.id == Protocol.version_id) for join_cls, join_filter in from_tables.items(): query = query.join(join_cls, join_filter) # Apply search filters. for search_filter in filters: query = query.filter(search_filter) # Get results query = query.distinct().order_by(Protocol.created_on.desc()) rows = [ protocol for protocol in query if check_access(enforcer, user=current_user.username, path=f"/protocol/{str(protocol.id)}", method="GET") ] return paginatify( items_label='protocols', items=rows, item_to_dict=lambda protocol: ProtocolModel.parse_obj(protocol._asdict()), page=page, per_page=per_page, ) def graphql_crud_get_samples( enforcer: casbin.Enforcer, current_user: Auth0CurrentUserPatched, info: ResolveInfo, # Search parameters protocol: Optional[int] = None, run: Optional[int] = None, plate: Optional[str] = None, reagent: Optional[str] = None, sample: Optional[str] = None, creator: Optional[str] = None, archived: Optional[bool] = None, # Paging parameters page: Optional[int] = None, per_page: Optional[int] = None, ): # Calculate which top level fields to remove. top_level_ignore = {'sample_id', 'plate_id', 'run_version_id', 'protocol_version_id', 'created_by', 'created_on', 'updated_by', 'updated_on', 'run_id', 'protocol_id'} # Flatten `info` parameter into jsonb_query_path statements. select_args = [] top_level = set() for result in graphql_ast_flatten_field(info.field_asts[0], info.fragments, info.return_type, info.schema): result_parts = result.split('.') if len(result_parts) > 3 and result_parts[3] not in top_level_ignore: top_level.add(result_parts[3]) jsonb_fields = [ 'sample_id', 'sampleID', 'plate_id', 'plateID', 'run_version_id', 'protocol_version_id', 'created_by', 'created_on', 'updated_by', 'updated_on', 'run_id', 'runID', 'protocol_id', 'protocolID', ] select_args = [ Sample.sample_id.label('sample_id'), Sample.sample_id.label('sampleID'), Sample.plate_id.label('plate_id'), Sample.plate_id.label('plateID'), Sample.run_version_id.label('run_version_id'), Sample.protocol_version_id.label('protocol_version_id'), Sample.created_by.label('created_by'), Sample.created_on.label('created_on'), SampleVersion.updated_by.label('updated_by'), SampleVersion.updated_on.label('updated_on'), RunVersion.run_id.label('run_id'), RunVersion.run_id.label('runID'), ProtocolVersion.protocol_id.label('protocol_id'), ProtocolVersion.protocol_id.label('protocolID'), ] for field in top_level: jsonb_fields.append(field) select_args.append(SampleVersion.data[field].label(field)) db = get_session(info) # Join with additional tables as necessary for search params. from_tables = OrderedDict() filters = [] if protocol: filters.append(ProtocolVersion.protocol_id == protocol) if run: filters.append(RunVersion.run_id == run) if plate: filters.append(Sample.plate_id.like(f"%{plate}%")) if reagent: filters.append(filter_by_reagent_label_filter(reagent)) if sample: filters.append(Sample.sample_id.like(f"%{sample}%")) if creator: filters.append(Sample.created_by == creator) if archived is None or archived == False: filters.append(Sample.is_deleted == False) query = db.query(*select_args)\ .select_from(Sample)\ .join(SampleVersion, SampleVersion.id == Sample.version_id)\ .join(RunVersion, RunVersion.id == Sample.run_version_id)\ .join(ProtocolVersion, ProtocolVersion.id == Sample.protocol_version_id) for join_cls, join_filter in from_tables.items(): query = query.join(join_cls, join_filter) # Apply search filters. for search_filter in filters: query = query.filter(search_filter) # Get results query = query.distinct().order_by(Sample.created_on.desc()) rows = [ sample for sample in query if check_access(enforcer, user=current_user.username, path=f"/run/{str(sample.run_id)}", method="GET") ] return paginatify( items_label='samples', items=rows, item_to_dict=lambda sample: SampleResult.parse_obj(add_sample_id(sample._asdict())), page=page, per_page=per_page, )
0.74382
0.14262
from abc import abstractmethod from sqlalchemy import Column, event, ForeignKey, Integer, String, VARBINARY from sqlalchemy import Boolean from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship import binascii import six from kmip.core import enums from kmip.pie import sqltypes as sql class ManagedObject(sql.Base): """ The abstract base class of the simplified KMIP object hierarchy. A ManagedObject is a core KMIP object that is the subject of key management operations. It contains various attributes that are common to all types of ManagedObjects, including keys, certificates, and various types of secret or sensitive data. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: value: The value of the ManagedObject. Type varies, usually bytes. unique_identifier: The string ID of the ManagedObject. names: A list of names associated with the ManagedObject. object_type: An enumeration associated with the type of ManagedObject. """ __tablename__ = 'managed_objects' unique_identifier = Column('uid', Integer, primary_key=True) _object_type = Column('object_type', sql.EnumType(enums.ObjectType)) _class_type = Column('class_type', String(50)) value = Column('value', VARBINARY(1024)) name_index = Column(Integer, default=0) _names = relationship('ManagedObjectName', back_populates='mo', cascade='all, delete-orphan') names = association_proxy('_names', 'name') operation_policy_name = Column( 'operation_policy_name', String(50), default='default' ) initial_date = Column(Integer, default=0) _owner = Column('owner', String(50), default=None) __mapper_args__ = { 'polymorphic_identity': 'ManagedObject', 'polymorphic_on': _class_type } __table_args__ = { 'sqlite_autoincrement': True } @abstractmethod def __init__(self): """ Create a ManagedObject. """ self.value = None self.unique_identifier = None self.name_index = 0 self.names = list() self.operation_policy_name = None self.initial_date = 0 self._object_type = None self._owner = None # All remaining attributes are not considered part of the public API # and are subject to change. self._application_specific_informations = list() self._contact_information = None self._object_groups = list() # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._archive_date = None self._last_change_date = None @property def object_type(self): """ Accessor and property definition for the object type attribute. Returns: ObjectType: An ObjectType enumeration that corresponds to the class of the object. """ return self._object_type @object_type.setter def object_type(self, value): """ Set blocker for the object type attribute. Raises: AttributeError: Always raised to block setting of attribute. """ raise AttributeError("object type cannot be set") @abstractmethod def validate(self): """ Verify that the contents of the ManagedObject are valid. """ pass @abstractmethod def __repr__(self): pass @abstractmethod def __str__(self): pass @abstractmethod def __eq__(self, other): pass @abstractmethod def __ne__(self, other): pass class CryptographicObject(ManagedObject): """ The abstract base class of all ManagedObjects related to cryptography. A CryptographicObject is a core KMIP object that is the subject of key management operations. It contains various attributes that are common to all types of CryptographicObjects, including keys and certificates. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_usage_masks: A list of usage mask enumerations describing how the CryptographicObject will be used. """ __tablename__ = 'crypto_objects' unique_identifier = Column('uid', Integer, ForeignKey('managed_objects.uid'), primary_key=True) cryptographic_usage_masks = Column('cryptographic_usage_mask', sql.UsageMaskType) state = Column('state', sql.EnumType(enums.State)) __mapper_args__ = { 'polymorphic_identity': 'CryptographicObject' } __table_args__ = { 'sqlite_autoincrement': True } @abstractmethod def __init__(self): """ Create a CryptographicObject. """ super(CryptographicObject, self).__init__() self.cryptographic_usage_masks = list() self.state = enums.State.PRE_ACTIVE # All remaining attributes are not considered part of the public API # and are subject to change. self._digests = list() # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._activation_date = None self._compromise_date = None self._compromise_occurrence_date = None self._deactivation_date = None self._destroy_date = None self._fresh = None self._lease_time = None self._links = list() self._revocation_reason = None class Key(CryptographicObject): """ The abstract base class of all ManagedObjects that are cryptographic keys. A Key is a core KMIP object that is the subject of key management operations. It contains various attributes that are common to all types of Keys, including symmetric and asymmetric keys. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_algorithm: A CryptographicAlgorithm enumeration defining the algorithm the key should be used with. cryptographic_length: An int defining the length of the key in bits. key_format_type: A KeyFormatType enumeration defining the format of the key value. key_wrapping_data: A dictionary containing key wrapping data settings, describing how the key value has been wrapped. """ __tablename__ = 'keys' unique_identifier = Column('uid', Integer, ForeignKey('crypto_objects.uid'), primary_key=True) cryptographic_algorithm = Column( 'cryptographic_algorithm', sql.EnumType(enums.CryptographicAlgorithm)) cryptographic_length = Column('cryptographic_length', Integer) key_format_type = Column( 'key_format_type', sql.EnumType(enums.KeyFormatType)) # Key wrapping data fields _kdw_wrapping_method = Column( '_kdw_wrapping_method', sql.EnumType(enums.WrappingMethod), default=None ) _kdw_eki_unique_identifier = Column( '_kdw_eki_unique_identifier', String, default=None ) _kdw_eki_cp_block_cipher_mode = Column( '_kdw_eki_cp_block_cipher_mode', sql.EnumType(enums.BlockCipherMode), default=None ) _kdw_eki_cp_padding_method = Column( '_kdw_eki_cp_padding_method', sql.EnumType(enums.PaddingMethod), default=None ) _kdw_eki_cp_hashing_algorithm = Column( '_kdw_eki_cp_hashing_algorithm', sql.EnumType(enums.HashingAlgorithm), default=None ) _kdw_eki_cp_key_role_type = Column( '_kdw_eki_cp_key_role_type', sql.EnumType(enums.KeyRoleType), default=None ) _kdw_eki_cp_digital_signature_algorithm = Column( '_kdw_eki_cp_digital_signature_algorithm', sql.EnumType(enums.DigitalSignatureAlgorithm), default=None ) _kdw_eki_cp_cryptographic_algorithm = Column( '_kdw_eki_cp_cryptographic_algorithm', sql.EnumType(enums.CryptographicAlgorithm), default=None ) _kdw_eki_cp_random_iv = Column( '_kdw_eki_cp_random_iv', Boolean, default=None ) _kdw_eki_cp_iv_length = Column( '_kdw_eki_cp_iv_length', Integer, default=None ) _kdw_eki_cp_tag_length = Column( '_kdw_eki_cp_tag_length', Integer, default=None ) _kdw_eki_cp_fixed_field_length = Column( '_kdw_eki_cp_fixed_field_length', Integer, default=None ) _kdw_eki_cp_invocation_field_length = Column( '_kdw_eki_cp_invocation_field_length', Integer ) _kdw_eki_cp_counter_length = Column( '_kdw_eki_cp_counter_length', Integer, default=None ) _kdw_eki_cp_initial_counter_value = Column( '_kdw_eki_cp_initial_counter_value', Integer, default=None ) _kdw_mski_unique_identifier = Column( '_kdw_mski_unique_identifier', String, default=None ) _kdw_mski_cp_block_cipher_mode = Column( '_kdw_mski_cp_block_cipher_mode', sql.EnumType(enums.BlockCipherMode), default=None ) _kdw_mski_cp_padding_method = Column( '_kdw_mski_cp_padding_method', sql.EnumType(enums.PaddingMethod), default=None ) _kdw_mski_cp_hashing_algorithm = Column( '_kdw_mski_cp_hashing_algorithm', sql.EnumType(enums.HashingAlgorithm), default=None ) _kdw_mski_cp_key_role_type = Column( '_kdw_mski_cp_key_role_type', sql.EnumType(enums.KeyRoleType), default=None ) _kdw_mski_cp_digital_signature_algorithm = Column( '_kdw_mski_cp_digital_signature_algorithm', sql.EnumType(enums.DigitalSignatureAlgorithm), default=None ) _kdw_mski_cp_cryptographic_algorithm = Column( '_kdw_mski_cp_cryptographic_algorithm', sql.EnumType(enums.CryptographicAlgorithm), default=None ) _kdw_mski_cp_random_iv = Column( '_kdw_mski_cp_random_iv', Boolean, default=None ) _kdw_mski_cp_iv_length = Column( '_kdw_mski_cp_iv_length', Integer, default=None ) _kdw_mski_cp_tag_length = Column( '_kdw_mski_cp_tag_length', Integer, default=None ) _kdw_mski_cp_fixed_field_length = Column( '_kdw_mski_cp_fixed_field_length', Integer, default=None ) _kdw_mski_cp_invocation_field_length = Column( '_kdw_mski_cp_invocation_field_length', Integer, default=None ) _kdw_mski_cp_counter_length = Column( '_kdw_mski_cp_counter_length', Integer, default=None ) _kdw_mski_cp_initial_counter_value = Column( '_kdw_mski_cp_initial_counter_value', Integer, default=None ) _kdw_mac_signature = Column( '_kdw_mac_signature', VARBINARY(1024), default=None ) _kdw_iv_counter_nonce = Column( '_kdw_iv_counter_nonce', VARBINARY(1024), default=None ) _kdw_encoding_option = Column( '_kdw_encoding_option', sql.EnumType(enums.EncodingOption), default=None ) __mapper_args__ = { 'polymorphic_identity': 'Key' } __table_args__ = { 'sqlite_autoincrement': True } @abstractmethod def __init__(self, key_wrapping_data=None): """ Create a Key object. Args: key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None. """ super(Key, self).__init__() self.cryptographic_algorithm = None self.cryptographic_length = None self.key_format_type = None self.key_wrapping_data = key_wrapping_data # All remaining attributes are not considered part of the public API # and are subject to change. self._cryptographic_parameters = list() # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._usage_limits = None @property def key_wrapping_data(self): """ Retrieve all of the relevant key wrapping data fields and return them as a dictionary. """ key_wrapping_data = {} encryption_key_info = { 'unique_identifier': self._kdw_eki_unique_identifier, 'cryptographic_parameters': { 'block_cipher_mode': self._kdw_eki_cp_block_cipher_mode, 'padding_method': self._kdw_eki_cp_padding_method, 'hashing_algorithm': self._kdw_eki_cp_hashing_algorithm, 'key_role_type': self._kdw_eki_cp_key_role_type, 'digital_signature_algorithm': self._kdw_eki_cp_digital_signature_algorithm, 'cryptographic_algorithm': self._kdw_eki_cp_cryptographic_algorithm, 'random_iv': self._kdw_eki_cp_random_iv, 'iv_length': self._kdw_eki_cp_iv_length, 'tag_length': self._kdw_eki_cp_tag_length, 'fixed_field_length': self._kdw_eki_cp_fixed_field_length, 'invocation_field_length': self._kdw_eki_cp_invocation_field_length, 'counter_length': self._kdw_eki_cp_counter_length, 'initial_counter_value': self._kdw_eki_cp_initial_counter_value } } if not any(encryption_key_info['cryptographic_parameters'].values()): encryption_key_info['cryptographic_parameters'] = {} if not any(encryption_key_info.values()): encryption_key_info = {} mac_sign_key_info = { 'unique_identifier': self._kdw_mski_unique_identifier, 'cryptographic_parameters': { 'block_cipher_mode': self._kdw_mski_cp_block_cipher_mode, 'padding_method': self._kdw_mski_cp_padding_method, 'hashing_algorithm': self._kdw_mski_cp_hashing_algorithm, 'key_role_type': self._kdw_mski_cp_key_role_type, 'digital_signature_algorithm': self._kdw_mski_cp_digital_signature_algorithm, 'cryptographic_algorithm': self._kdw_mski_cp_cryptographic_algorithm, 'random_iv': self._kdw_mski_cp_random_iv, 'iv_length': self._kdw_mski_cp_iv_length, 'tag_length': self._kdw_mski_cp_tag_length, 'fixed_field_length': self._kdw_mski_cp_fixed_field_length, 'invocation_field_length': self._kdw_mski_cp_invocation_field_length, 'counter_length': self._kdw_mski_cp_counter_length, 'initial_counter_value': self._kdw_mski_cp_initial_counter_value } } if not any(mac_sign_key_info['cryptographic_parameters'].values()): mac_sign_key_info['cryptographic_parameters'] = {} if not any(mac_sign_key_info.values()): mac_sign_key_info = {} key_wrapping_data['wrapping_method'] = self._kdw_wrapping_method key_wrapping_data['encryption_key_information'] = encryption_key_info key_wrapping_data['mac_signature_key_information'] = mac_sign_key_info key_wrapping_data['mac_signature'] = self._kdw_mac_signature key_wrapping_data['iv_counter_nonce'] = self._kdw_iv_counter_nonce key_wrapping_data['encoding_option'] = self._kdw_encoding_option if not any(key_wrapping_data.values()): key_wrapping_data = {} return key_wrapping_data @key_wrapping_data.setter def key_wrapping_data(self, value): """ Set the key wrapping data attributes using a dictionary. """ if value is None: value = {} elif not isinstance(value, dict): raise TypeError("Key wrapping data must be a dictionary.") self._kdw_wrapping_method = value.get('wrapping_method') eki = value.get('encryption_key_information') if eki is None: eki = {} self._kdw_eki_unique_identifier = eki.get('unique_identifier') eki_cp = eki.get('cryptographic_parameters') if eki_cp is None: eki_cp = {} self._kdw_eki_cp_block_cipher_mode = eki_cp.get('block_cipher_mode') self._kdw_eki_cp_padding_method = eki_cp.get('padding_method') self._kdw_eki_cp_hashing_algorithm = eki_cp.get('hashing_algorithm') self._kdw_eki_cp_key_role_type = eki_cp.get('key_role_type') self._kdw_eki_cp_digital_signature_algorithm = \ eki_cp.get('digital_signature_algorithm') self._kdw_eki_cp_cryptographic_algorithm = \ eki_cp.get('cryptographic_algorithm') self._kdw_eki_cp_random_iv = eki_cp.get('random_iv') self._kdw_eki_cp_iv_length = eki_cp.get('iv_length') self._kdw_eki_cp_tag_length = eki_cp.get('tag_length') self._kdw_eki_cp_fixed_field_length = eki_cp.get('fixed_field_length') self._kdw_eki_cp_invocation_field_length = \ eki_cp.get('invocation_field_length') self._kdw_eki_cp_counter_length = eki_cp.get('counter_length') self._kdw_eki_cp_initial_counter_value = \ eki_cp.get('initial_counter_value') mski = value.get('mac_signature_key_information') if mski is None: mski = {} self._kdw_mski_unique_identifier = mski.get('unique_identifier') mski_cp = mski.get('cryptographic_parameters') if mski_cp is None: mski_cp = {} self._kdw_mski_cp_block_cipher_mode = mski_cp.get('block_cipher_mode') self._kdw_mski_cp_padding_method = mski_cp.get('padding_method') self._kdw_mski_cp_hashing_algorithm = mski_cp.get('hashing_algorithm') self._kdw_mski_cp_key_role_type = mski_cp.get('key_role_type') self._kdw_mski_cp_digital_signature_algorithm = \ mski_cp.get('digital_signature_algorithm') self._kdw_mski_cp_cryptographic_algorithm = \ mski_cp.get('cryptographic_algorithm') self._kdw_mski_cp_random_iv = mski_cp.get('random_iv') self._kdw_mski_cp_iv_length = mski_cp.get('iv_length') self._kdw_mski_cp_tag_length = mski_cp.get('tag_length') self._kdw_mski_cp_fixed_field_length = \ mski_cp.get('fixed_field_length') self._kdw_mski_cp_invocation_field_length = \ mski_cp.get('invocation_field_length') self._kdw_mski_cp_counter_length = mski_cp.get('counter_length') self._kdw_mski_cp_initial_counter_value = \ mski_cp.get('initial_counter_value') self._kdw_mac_signature = value.get('mac_signature') self._kdw_iv_counter_nonce = value.get('iv_counter_nonce') self._kdw_encoding_option = value.get('encoding_option') class SymmetricKey(Key): """ The SymmetricKey class of the simplified KMIP object hierarchy. A SymmetricKey is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_algorithm: The type of algorithm for the SymmetricKey. cryptographic_length: The length in bits of the SymmetricKey value. value: The bytes of the SymmetricKey. key_format_type: The format of the key value. cryptographic_usage_masks: The list of usage mask flags for SymmetricKey application. names: The string names of the SymmetricKey. key_wrapping_data: A dictionary containing key wrapping data settings, describing how the key value has been wrapped. """ __tablename__ = 'symmetric_keys' unique_identifier = Column('uid', Integer, ForeignKey('keys.uid'), primary_key=True) __mapper_args__ = { 'polymorphic_identity': 'SymmetricKey' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, algorithm, length, value, masks=None, name='Symmetric Key', key_wrapping_data=None): """ Create a SymmetricKey. Args: algorithm(CryptographicAlgorithm): An enumeration identifying the type of algorithm for the key. length(int): The length in bits of the key. value(bytes): The bytes representing the key. masks(list): A list of CryptographicUsageMask enumerations defining how the key will be used. Optional, defaults to None. name(string): The string name of the key. Optional, defaults to 'Symmetric Key'. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None. """ super(SymmetricKey, self).__init__( key_wrapping_data=key_wrapping_data ) self._object_type = enums.ObjectType.SYMMETRIC_KEY self.key_format_type = enums.KeyFormatType.RAW self.value = value self.cryptographic_algorithm = algorithm self.cryptographic_length = length self.names = [name] if masks: self.cryptographic_usage_masks.extend(masks) # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._process_start_date = None self._protect_stop_date = None self.validate() def validate(self): """ Verify that the contents of the SymmetricKey object are valid. Raises: TypeError: if the types of any SymmetricKey attributes are invalid ValueError: if the key length and key value length do not match """ if not isinstance(self.value, bytes): raise TypeError("key value must be bytes") elif not isinstance(self.cryptographic_algorithm, enums.CryptographicAlgorithm): raise TypeError("key algorithm must be a CryptographicAlgorithm " "enumeration") elif not isinstance(self.cryptographic_length, six.integer_types): raise TypeError("key length must be an integer") mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "key mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("key name {0} must be a string".format( position)) if not self.key_wrapping_data: if (len(self.value) * 8) != self.cryptographic_length: msg = "key length ({0}) not equal to key value length ({1})" msg = msg.format( self.cryptographic_length, len(self.value) * 8 ) raise ValueError(msg) def __repr__(self): algorithm = "algorithm={0}".format(self.cryptographic_algorithm) length = "length={0}".format(self.cryptographic_length) value = "value={0}".format(binascii.hexlify(self.value)) key_wrapping_data = "key_wrapping_data={0}".format( self.key_wrapping_data ) return "SymmetricKey({0}, {1}, {2}, {3})".format( algorithm, length, value, key_wrapping_data ) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, SymmetricKey): if self.value != other.value: return False elif self.cryptographic_algorithm != other.cryptographic_algorithm: return False elif self.cryptographic_length != other.cryptographic_length: return False elif self.key_wrapping_data != other.key_wrapping_data: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, SymmetricKey): return not (self == other) else: return NotImplemented event.listen(SymmetricKey._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class PublicKey(Key): """ The PublicKey class of the simplified KMIP object hierarchy. A PublicKey is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_algorithm: The type of algorithm for the PublicKey. cryptographic_length: The length in bits of the PublicKey. value: The bytes of the PublicKey. key_format_type: The format of the key value. cryptographic_usage_masks: The list of usage mask flags for PublicKey application. names: The list of string names of the PublicKey. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. """ __tablename__ = 'public_keys' unique_identifier = Column('uid', Integer, ForeignKey('keys.uid'), primary_key=True) __mapper_args__ = { 'polymorphic_identity': 'PublicKey' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, algorithm, length, value, format_type=enums.KeyFormatType.X_509, masks=None, name='Public Key', key_wrapping_data=None): """ Create a PublicKey. Args: algorithm(CryptographicAlgorithm): An enumeration identifying the type of algorithm for the key. length(int): The length in bits of the key. value(bytes): The bytes representing the key. format_type(KeyFormatType): An enumeration defining the format of the key value. Optional, defaults to enums.KeyFormatType.X_509. masks(list): A list of CryptographicUsageMask enumerations defining how the key will be used. Optional, defaults to None. name(string): The string name of the key. Optional, defaults to 'Public Key'. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None. """ super(PublicKey, self).__init__( key_wrapping_data=key_wrapping_data ) self._object_type = enums.ObjectType.PUBLIC_KEY self._valid_formats = [ enums.KeyFormatType.RAW, enums.KeyFormatType.X_509, enums.KeyFormatType.PKCS_1] self.value = value self.cryptographic_algorithm = algorithm self.cryptographic_length = length self.key_format_type = format_type self.names = [name] if masks: self.cryptographic_usage_masks = masks # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._cryptographic_domain_parameters = list() self.validate() def validate(self): """ Verify that the contents of the PublicKey object are valid. Raises: TypeError: if the types of any PublicKey attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("key value must be bytes") elif not isinstance(self.cryptographic_algorithm, enums.CryptographicAlgorithm): raise TypeError("key algorithm must be a CryptographicAlgorithm " "enumeration") elif not isinstance(self.cryptographic_length, six.integer_types): raise TypeError("key length must be an integer") elif not isinstance(self.key_format_type, enums.KeyFormatType): raise TypeError("key format type must be a KeyFormatType " "enumeration") elif self.key_format_type not in self._valid_formats: raise ValueError("key format type must be one of {0}".format( self._valid_formats)) # TODO (peter-hamilton) Verify that the key bytes match the key format mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "key mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("key name {0} must be a string".format( position)) def __repr__(self): algorithm = "algorithm={0}".format(self.cryptographic_algorithm) length = "length={0}".format(self.cryptographic_length) value = "value={0}".format(binascii.hexlify(self.value)) format_type = "format_type={0}".format(self.key_format_type) key_wrapping_data = "key_wrapping_data={0}".format( self.key_wrapping_data ) return "PublicKey({0}, {1}, {2}, {3}, {4})".format( algorithm, length, value, format_type, key_wrapping_data) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, PublicKey): if self.value != other.value: return False elif self.key_format_type != other.key_format_type: return False elif self.cryptographic_algorithm != other.cryptographic_algorithm: return False elif self.cryptographic_length != other.cryptographic_length: return False elif self.key_wrapping_data != other.key_wrapping_data: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, PublicKey): return not (self == other) else: return NotImplemented event.listen(PublicKey._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class PrivateKey(Key): """ The PrivateKey class of the simplified KMIP object hierarchy. A PrivateKey is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_algorithm: The type of algorithm for the PrivateKey. cryptographic_length: The length in bits of the PrivateKey. value: The bytes of the PrivateKey. key_format_type: The format of the key value. cryptographic_usage_masks: The list of usage mask flags for PrivateKey application. Optional, defaults to None. names: The list of string names of the PrivateKey. Optional, defaults to 'Private Key'. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. """ __tablename__ = 'private_keys' unique_identifier = Column('uid', Integer, ForeignKey('keys.uid'), primary_key=True) __mapper_args__ = { 'polymorphic_identity': 'PrivateKey' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, algorithm, length, value, format_type, masks=None, name='Private Key', key_wrapping_data=None): """ Create a PrivateKey. Args: algorithm(CryptographicAlgorithm): An enumeration identifying the type of algorithm for the key. length(int): The length in bits of the key. value(bytes): The bytes representing the key. format_type(KeyFormatType): An enumeration defining the format of the key value. masks(list): A list of CryptographicUsageMask enumerations defining how the key will be used. name(string): The string name of the key. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None. """ super(PrivateKey, self).__init__( key_wrapping_data=key_wrapping_data ) self._object_type = enums.ObjectType.PRIVATE_KEY self._valid_formats = [ enums.KeyFormatType.RAW, enums.KeyFormatType.PKCS_1, enums.KeyFormatType.PKCS_8] self.value = value self.cryptographic_algorithm = algorithm self.cryptographic_length = length self.key_format_type = format_type self.names = [name] if masks: self.cryptographic_usage_masks = masks # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._cryptographic_domain_parameters = list() self.validate() def validate(self): """ Verify that the contents of the PrivateKey object are valid. Raises: TypeError: if the types of any PrivateKey attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("key value must be bytes") elif not isinstance(self.cryptographic_algorithm, enums.CryptographicAlgorithm): raise TypeError("key algorithm must be a CryptographicAlgorithm " "enumeration") elif not isinstance(self.cryptographic_length, six.integer_types): raise TypeError("key length must be an integer") elif not isinstance(self.key_format_type, enums.KeyFormatType): raise TypeError("key format type must be a KeyFormatType " "enumeration") elif self.key_format_type not in self._valid_formats: raise ValueError("key format type must be one of {0}".format( self._valid_formats)) # TODO (peter-hamilton) Verify that the key bytes match the key format mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "key mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("key name {0} must be a string".format( position)) def __repr__(self): algorithm = "algorithm={0}".format(self.cryptographic_algorithm) length = "length={0}".format(self.cryptographic_length) value = "value={0}".format(binascii.hexlify(self.value)) format_type = "format_type={0}".format(self.key_format_type) key_wrapping_data = "key_wrapping_data={0}".format( self.key_wrapping_data ) return "PrivateKey({0}, {1}, {2}, {3}, {4})".format( algorithm, length, value, format_type, key_wrapping_data) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, PrivateKey): if self.value != other.value: return False elif self.key_format_type != other.key_format_type: return False elif self.cryptographic_algorithm != other.cryptographic_algorithm: return False elif self.cryptographic_length != other.cryptographic_length: return False elif self.key_wrapping_data != other.key_wrapping_data: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, PrivateKey): return not (self == other) else: return NotImplemented event.listen(PrivateKey._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class Certificate(CryptographicObject): """ The Certificate class of the simplified KMIP object hierarchy. A Certificate is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: certificate_type: The type of the Certificate. value: The bytes of the Certificate. cryptographic_usage_masks: The list of usage mask flags for Certificate application. names: The list of string names of the Certificate. """ __tablename__ = 'certificates' unique_identifier = Column('uid', Integer, ForeignKey('crypto_objects.uid'), primary_key=True) certificate_type = Column( 'certificate_type', sql.EnumType(enums.CertificateType)) __mapper_args__ = { 'polymorphic_identity': 'Certificate' } __table_args__ = { 'sqlite_autoincrement': True } @abstractmethod def __init__(self, certificate_type, value, masks=None, name='Certificate'): """ Create a Certificate. Args: certificate_type(CertificateType): An enumeration defining the type of the certificate. value(bytes): The bytes representing the certificate. masks(list): A list of CryptographicUsageMask enumerations defining how the certificate will be used. name(string): The string name of the certificate. """ super(Certificate, self).__init__() self._object_type = enums.ObjectType.CERTIFICATE self.value = value self.certificate_type = certificate_type self.names = [name] if masks: self.cryptographic_usage_masks = masks # All remaining attributes are not considered part of the public API # and are subject to change. self._cryptographic_algorithm = None self._cryptographic_length = None self._certificate_length = None # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._cryptographic_parameters = list() self._digital_signature_algorithm = list() self.validate() def validate(self): """ Verify that the contents of the Certificate object are valid. Raises: TypeError: if the types of any Certificate attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("certificate value must be bytes") elif not isinstance(self.certificate_type, enums.CertificateType): raise TypeError("certificate type must be a CertificateType " "enumeration") mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "certificate mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("certificate name {0} must be a string".format( position)) def __str__(self): return str(binascii.hexlify(self.value)) class X509Certificate(Certificate): """ The X509Certificate class of the simplified KMIP object hierarchy. An X509Certificate is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: value: The bytes of the Certificate. cryptographic_usage_masks: The list of usage mask flags for Certificate application. names: The list of string names of the Certificate. """ __tablename__ = 'x509_certificates' unique_identifier = Column('uid', Integer, ForeignKey('certificates.uid'), primary_key=True) __mapper_args__ = { 'polymorphic_identity': 'X509Certificate' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, value, masks=None, name='X.509 Certificate'): """ Create an X509Certificate. Args: value(bytes): The bytes representing the certificate. masks(list): A list of CryptographicUsageMask enumerations defining how the certificate will be used. name(string): The string name of the certificate. """ super(X509Certificate, self).__init__( enums.CertificateType.X_509, value, masks, name) # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._x509_certificate_identifier = None self._x509_certificate_subject = None self._x509_certificate_issuer = None self.validate() def __repr__(self): certificate_type = "certificate_type={0}".format(self.certificate_type) value = "value={0}".format(binascii.hexlify(self.value)) return "X509Certificate({0}, {1})".format(certificate_type, value) def __eq__(self, other): if isinstance(other, X509Certificate): if self.value != other.value: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, X509Certificate): return not (self == other) else: return NotImplemented event.listen(X509Certificate._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class SecretData(CryptographicObject): """ The SecretData class of the simplified KMIP object hierarchy. SecretData is one of several CryptographicObjects and is one of the core KMIP objects that are the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_usage_masks: A list of usage mask enumerations describing how the CryptographicObject will be used. data_type: The type of the secret value. """ __tablename__ = 'secret_data_objects' unique_identifier = Column('uid', Integer, ForeignKey('crypto_objects.uid'), primary_key=True) data_type = Column('data_type', sql.EnumType(enums.SecretDataType)) __mapper_args__ = { 'polymorphic_identity': 'SecretData' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, value, data_type, masks=None, name='Secret Data'): """ Create a SecretData object. Args: value(bytes): The bytes representing secret data. data_type(SecretDataType): An enumeration defining the type of the secret value. masks(list): A list of CryptographicUsageMask enumerations defining how the key will be used. name(string): The string name of the key. """ super(SecretData, self).__init__() self._object_type = enums.ObjectType.SECRET_DATA self.value = value self.data_type = data_type self.names = [name] if masks: self.cryptographic_usage_masks = masks # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self.validate() def validate(self): """ Verify that the contents of the SecretData object are valid. Raises: TypeError: if the types of any SecretData attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("secret value must be bytes") elif not isinstance(self.data_type, enums.SecretDataType): raise TypeError("secret data type must be a SecretDataType " "enumeration") mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "secret data mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("secret data name {0} must be a string".format( position)) def __repr__(self): value = "value={0}".format(binascii.hexlify(self.value)) data_type = "data_type={0}".format(self.data_type) return "SecretData({0}, {1})".format(value, data_type) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, SecretData): if self.value != other.value: return False elif self.data_type != other.data_type: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, SecretData): return not (self == other) else: return NotImplemented event.listen(SecretData._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class OpaqueObject(ManagedObject): """ The OpaqueObject class of the simplified KMIP object hierarchy. OpaqueObject is one of several ManagedObjects and is one of the core KMIP objects that are the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: opaque_type: The type of the opaque value. """ __tablename__ = 'opaque_objects' unique_identifier = Column('uid', Integer, ForeignKey('managed_objects.uid'), primary_key=True) opaque_type = Column('opaque_type', sql.EnumType(enums.OpaqueDataType)) __mapper_args__ = { 'polymorphic_identity': 'OpaqueData' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, value, opaque_type, name='Opaque Object'): """ Create a OpaqueObject. Args: value(bytes): The bytes representing opaque data. opaque_type(OpaqueDataType): An enumeration defining the type of the opaque value. name(string): The string name of the opaque object. """ super(OpaqueObject, self).__init__() self._object_type = enums.ObjectType.OPAQUE_DATA self.value = value self.opaque_type = opaque_type self.names.append(name) # All remaining attributes are not considered part of the public API # and are subject to change. self._digest = None self._revocation_reason = None # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._destroy_date = None self._compromise_occurrence_date = None self._compromise_date = None self.validate() def validate(self): """ Verify that the contents of the OpaqueObject are valid. Raises: TypeError: if the types of any OpaqueObject attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("opaque value must be bytes") elif not isinstance(self.opaque_type, enums.OpaqueDataType): raise TypeError("opaque data type must be an OpaqueDataType " "enumeration") name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("opaque data name {0} must be a string".format( position)) def __repr__(self): value = "value={0}".format(binascii.hexlify(self.value)) opaque_type = "opaque_type={0}".format(self.opaque_type) return "OpaqueObject({0}, {1})".format(value, opaque_type) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, OpaqueObject): if self.value != other.value: return False elif self.opaque_type != other.opaque_type: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, OpaqueObject): return not (self == other) else: return NotImplemented event.listen(OpaqueObject._names, 'append', sql.attribute_append_factory("name_index"), retval=False)
kmip/pie/objects.py
from abc import abstractmethod from sqlalchemy import Column, event, ForeignKey, Integer, String, VARBINARY from sqlalchemy import Boolean from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship import binascii import six from kmip.core import enums from kmip.pie import sqltypes as sql class ManagedObject(sql.Base): """ The abstract base class of the simplified KMIP object hierarchy. A ManagedObject is a core KMIP object that is the subject of key management operations. It contains various attributes that are common to all types of ManagedObjects, including keys, certificates, and various types of secret or sensitive data. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: value: The value of the ManagedObject. Type varies, usually bytes. unique_identifier: The string ID of the ManagedObject. names: A list of names associated with the ManagedObject. object_type: An enumeration associated with the type of ManagedObject. """ __tablename__ = 'managed_objects' unique_identifier = Column('uid', Integer, primary_key=True) _object_type = Column('object_type', sql.EnumType(enums.ObjectType)) _class_type = Column('class_type', String(50)) value = Column('value', VARBINARY(1024)) name_index = Column(Integer, default=0) _names = relationship('ManagedObjectName', back_populates='mo', cascade='all, delete-orphan') names = association_proxy('_names', 'name') operation_policy_name = Column( 'operation_policy_name', String(50), default='default' ) initial_date = Column(Integer, default=0) _owner = Column('owner', String(50), default=None) __mapper_args__ = { 'polymorphic_identity': 'ManagedObject', 'polymorphic_on': _class_type } __table_args__ = { 'sqlite_autoincrement': True } @abstractmethod def __init__(self): """ Create a ManagedObject. """ self.value = None self.unique_identifier = None self.name_index = 0 self.names = list() self.operation_policy_name = None self.initial_date = 0 self._object_type = None self._owner = None # All remaining attributes are not considered part of the public API # and are subject to change. self._application_specific_informations = list() self._contact_information = None self._object_groups = list() # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._archive_date = None self._last_change_date = None @property def object_type(self): """ Accessor and property definition for the object type attribute. Returns: ObjectType: An ObjectType enumeration that corresponds to the class of the object. """ return self._object_type @object_type.setter def object_type(self, value): """ Set blocker for the object type attribute. Raises: AttributeError: Always raised to block setting of attribute. """ raise AttributeError("object type cannot be set") @abstractmethod def validate(self): """ Verify that the contents of the ManagedObject are valid. """ pass @abstractmethod def __repr__(self): pass @abstractmethod def __str__(self): pass @abstractmethod def __eq__(self, other): pass @abstractmethod def __ne__(self, other): pass class CryptographicObject(ManagedObject): """ The abstract base class of all ManagedObjects related to cryptography. A CryptographicObject is a core KMIP object that is the subject of key management operations. It contains various attributes that are common to all types of CryptographicObjects, including keys and certificates. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_usage_masks: A list of usage mask enumerations describing how the CryptographicObject will be used. """ __tablename__ = 'crypto_objects' unique_identifier = Column('uid', Integer, ForeignKey('managed_objects.uid'), primary_key=True) cryptographic_usage_masks = Column('cryptographic_usage_mask', sql.UsageMaskType) state = Column('state', sql.EnumType(enums.State)) __mapper_args__ = { 'polymorphic_identity': 'CryptographicObject' } __table_args__ = { 'sqlite_autoincrement': True } @abstractmethod def __init__(self): """ Create a CryptographicObject. """ super(CryptographicObject, self).__init__() self.cryptographic_usage_masks = list() self.state = enums.State.PRE_ACTIVE # All remaining attributes are not considered part of the public API # and are subject to change. self._digests = list() # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._activation_date = None self._compromise_date = None self._compromise_occurrence_date = None self._deactivation_date = None self._destroy_date = None self._fresh = None self._lease_time = None self._links = list() self._revocation_reason = None class Key(CryptographicObject): """ The abstract base class of all ManagedObjects that are cryptographic keys. A Key is a core KMIP object that is the subject of key management operations. It contains various attributes that are common to all types of Keys, including symmetric and asymmetric keys. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_algorithm: A CryptographicAlgorithm enumeration defining the algorithm the key should be used with. cryptographic_length: An int defining the length of the key in bits. key_format_type: A KeyFormatType enumeration defining the format of the key value. key_wrapping_data: A dictionary containing key wrapping data settings, describing how the key value has been wrapped. """ __tablename__ = 'keys' unique_identifier = Column('uid', Integer, ForeignKey('crypto_objects.uid'), primary_key=True) cryptographic_algorithm = Column( 'cryptographic_algorithm', sql.EnumType(enums.CryptographicAlgorithm)) cryptographic_length = Column('cryptographic_length', Integer) key_format_type = Column( 'key_format_type', sql.EnumType(enums.KeyFormatType)) # Key wrapping data fields _kdw_wrapping_method = Column( '_kdw_wrapping_method', sql.EnumType(enums.WrappingMethod), default=None ) _kdw_eki_unique_identifier = Column( '_kdw_eki_unique_identifier', String, default=None ) _kdw_eki_cp_block_cipher_mode = Column( '_kdw_eki_cp_block_cipher_mode', sql.EnumType(enums.BlockCipherMode), default=None ) _kdw_eki_cp_padding_method = Column( '_kdw_eki_cp_padding_method', sql.EnumType(enums.PaddingMethod), default=None ) _kdw_eki_cp_hashing_algorithm = Column( '_kdw_eki_cp_hashing_algorithm', sql.EnumType(enums.HashingAlgorithm), default=None ) _kdw_eki_cp_key_role_type = Column( '_kdw_eki_cp_key_role_type', sql.EnumType(enums.KeyRoleType), default=None ) _kdw_eki_cp_digital_signature_algorithm = Column( '_kdw_eki_cp_digital_signature_algorithm', sql.EnumType(enums.DigitalSignatureAlgorithm), default=None ) _kdw_eki_cp_cryptographic_algorithm = Column( '_kdw_eki_cp_cryptographic_algorithm', sql.EnumType(enums.CryptographicAlgorithm), default=None ) _kdw_eki_cp_random_iv = Column( '_kdw_eki_cp_random_iv', Boolean, default=None ) _kdw_eki_cp_iv_length = Column( '_kdw_eki_cp_iv_length', Integer, default=None ) _kdw_eki_cp_tag_length = Column( '_kdw_eki_cp_tag_length', Integer, default=None ) _kdw_eki_cp_fixed_field_length = Column( '_kdw_eki_cp_fixed_field_length', Integer, default=None ) _kdw_eki_cp_invocation_field_length = Column( '_kdw_eki_cp_invocation_field_length', Integer ) _kdw_eki_cp_counter_length = Column( '_kdw_eki_cp_counter_length', Integer, default=None ) _kdw_eki_cp_initial_counter_value = Column( '_kdw_eki_cp_initial_counter_value', Integer, default=None ) _kdw_mski_unique_identifier = Column( '_kdw_mski_unique_identifier', String, default=None ) _kdw_mski_cp_block_cipher_mode = Column( '_kdw_mski_cp_block_cipher_mode', sql.EnumType(enums.BlockCipherMode), default=None ) _kdw_mski_cp_padding_method = Column( '_kdw_mski_cp_padding_method', sql.EnumType(enums.PaddingMethod), default=None ) _kdw_mski_cp_hashing_algorithm = Column( '_kdw_mski_cp_hashing_algorithm', sql.EnumType(enums.HashingAlgorithm), default=None ) _kdw_mski_cp_key_role_type = Column( '_kdw_mski_cp_key_role_type', sql.EnumType(enums.KeyRoleType), default=None ) _kdw_mski_cp_digital_signature_algorithm = Column( '_kdw_mski_cp_digital_signature_algorithm', sql.EnumType(enums.DigitalSignatureAlgorithm), default=None ) _kdw_mski_cp_cryptographic_algorithm = Column( '_kdw_mski_cp_cryptographic_algorithm', sql.EnumType(enums.CryptographicAlgorithm), default=None ) _kdw_mski_cp_random_iv = Column( '_kdw_mski_cp_random_iv', Boolean, default=None ) _kdw_mski_cp_iv_length = Column( '_kdw_mski_cp_iv_length', Integer, default=None ) _kdw_mski_cp_tag_length = Column( '_kdw_mski_cp_tag_length', Integer, default=None ) _kdw_mski_cp_fixed_field_length = Column( '_kdw_mski_cp_fixed_field_length', Integer, default=None ) _kdw_mski_cp_invocation_field_length = Column( '_kdw_mski_cp_invocation_field_length', Integer, default=None ) _kdw_mski_cp_counter_length = Column( '_kdw_mski_cp_counter_length', Integer, default=None ) _kdw_mski_cp_initial_counter_value = Column( '_kdw_mski_cp_initial_counter_value', Integer, default=None ) _kdw_mac_signature = Column( '_kdw_mac_signature', VARBINARY(1024), default=None ) _kdw_iv_counter_nonce = Column( '_kdw_iv_counter_nonce', VARBINARY(1024), default=None ) _kdw_encoding_option = Column( '_kdw_encoding_option', sql.EnumType(enums.EncodingOption), default=None ) __mapper_args__ = { 'polymorphic_identity': 'Key' } __table_args__ = { 'sqlite_autoincrement': True } @abstractmethod def __init__(self, key_wrapping_data=None): """ Create a Key object. Args: key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None. """ super(Key, self).__init__() self.cryptographic_algorithm = None self.cryptographic_length = None self.key_format_type = None self.key_wrapping_data = key_wrapping_data # All remaining attributes are not considered part of the public API # and are subject to change. self._cryptographic_parameters = list() # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._usage_limits = None @property def key_wrapping_data(self): """ Retrieve all of the relevant key wrapping data fields and return them as a dictionary. """ key_wrapping_data = {} encryption_key_info = { 'unique_identifier': self._kdw_eki_unique_identifier, 'cryptographic_parameters': { 'block_cipher_mode': self._kdw_eki_cp_block_cipher_mode, 'padding_method': self._kdw_eki_cp_padding_method, 'hashing_algorithm': self._kdw_eki_cp_hashing_algorithm, 'key_role_type': self._kdw_eki_cp_key_role_type, 'digital_signature_algorithm': self._kdw_eki_cp_digital_signature_algorithm, 'cryptographic_algorithm': self._kdw_eki_cp_cryptographic_algorithm, 'random_iv': self._kdw_eki_cp_random_iv, 'iv_length': self._kdw_eki_cp_iv_length, 'tag_length': self._kdw_eki_cp_tag_length, 'fixed_field_length': self._kdw_eki_cp_fixed_field_length, 'invocation_field_length': self._kdw_eki_cp_invocation_field_length, 'counter_length': self._kdw_eki_cp_counter_length, 'initial_counter_value': self._kdw_eki_cp_initial_counter_value } } if not any(encryption_key_info['cryptographic_parameters'].values()): encryption_key_info['cryptographic_parameters'] = {} if not any(encryption_key_info.values()): encryption_key_info = {} mac_sign_key_info = { 'unique_identifier': self._kdw_mski_unique_identifier, 'cryptographic_parameters': { 'block_cipher_mode': self._kdw_mski_cp_block_cipher_mode, 'padding_method': self._kdw_mski_cp_padding_method, 'hashing_algorithm': self._kdw_mski_cp_hashing_algorithm, 'key_role_type': self._kdw_mski_cp_key_role_type, 'digital_signature_algorithm': self._kdw_mski_cp_digital_signature_algorithm, 'cryptographic_algorithm': self._kdw_mski_cp_cryptographic_algorithm, 'random_iv': self._kdw_mski_cp_random_iv, 'iv_length': self._kdw_mski_cp_iv_length, 'tag_length': self._kdw_mski_cp_tag_length, 'fixed_field_length': self._kdw_mski_cp_fixed_field_length, 'invocation_field_length': self._kdw_mski_cp_invocation_field_length, 'counter_length': self._kdw_mski_cp_counter_length, 'initial_counter_value': self._kdw_mski_cp_initial_counter_value } } if not any(mac_sign_key_info['cryptographic_parameters'].values()): mac_sign_key_info['cryptographic_parameters'] = {} if not any(mac_sign_key_info.values()): mac_sign_key_info = {} key_wrapping_data['wrapping_method'] = self._kdw_wrapping_method key_wrapping_data['encryption_key_information'] = encryption_key_info key_wrapping_data['mac_signature_key_information'] = mac_sign_key_info key_wrapping_data['mac_signature'] = self._kdw_mac_signature key_wrapping_data['iv_counter_nonce'] = self._kdw_iv_counter_nonce key_wrapping_data['encoding_option'] = self._kdw_encoding_option if not any(key_wrapping_data.values()): key_wrapping_data = {} return key_wrapping_data @key_wrapping_data.setter def key_wrapping_data(self, value): """ Set the key wrapping data attributes using a dictionary. """ if value is None: value = {} elif not isinstance(value, dict): raise TypeError("Key wrapping data must be a dictionary.") self._kdw_wrapping_method = value.get('wrapping_method') eki = value.get('encryption_key_information') if eki is None: eki = {} self._kdw_eki_unique_identifier = eki.get('unique_identifier') eki_cp = eki.get('cryptographic_parameters') if eki_cp is None: eki_cp = {} self._kdw_eki_cp_block_cipher_mode = eki_cp.get('block_cipher_mode') self._kdw_eki_cp_padding_method = eki_cp.get('padding_method') self._kdw_eki_cp_hashing_algorithm = eki_cp.get('hashing_algorithm') self._kdw_eki_cp_key_role_type = eki_cp.get('key_role_type') self._kdw_eki_cp_digital_signature_algorithm = \ eki_cp.get('digital_signature_algorithm') self._kdw_eki_cp_cryptographic_algorithm = \ eki_cp.get('cryptographic_algorithm') self._kdw_eki_cp_random_iv = eki_cp.get('random_iv') self._kdw_eki_cp_iv_length = eki_cp.get('iv_length') self._kdw_eki_cp_tag_length = eki_cp.get('tag_length') self._kdw_eki_cp_fixed_field_length = eki_cp.get('fixed_field_length') self._kdw_eki_cp_invocation_field_length = \ eki_cp.get('invocation_field_length') self._kdw_eki_cp_counter_length = eki_cp.get('counter_length') self._kdw_eki_cp_initial_counter_value = \ eki_cp.get('initial_counter_value') mski = value.get('mac_signature_key_information') if mski is None: mski = {} self._kdw_mski_unique_identifier = mski.get('unique_identifier') mski_cp = mski.get('cryptographic_parameters') if mski_cp is None: mski_cp = {} self._kdw_mski_cp_block_cipher_mode = mski_cp.get('block_cipher_mode') self._kdw_mski_cp_padding_method = mski_cp.get('padding_method') self._kdw_mski_cp_hashing_algorithm = mski_cp.get('hashing_algorithm') self._kdw_mski_cp_key_role_type = mski_cp.get('key_role_type') self._kdw_mski_cp_digital_signature_algorithm = \ mski_cp.get('digital_signature_algorithm') self._kdw_mski_cp_cryptographic_algorithm = \ mski_cp.get('cryptographic_algorithm') self._kdw_mski_cp_random_iv = mski_cp.get('random_iv') self._kdw_mski_cp_iv_length = mski_cp.get('iv_length') self._kdw_mski_cp_tag_length = mski_cp.get('tag_length') self._kdw_mski_cp_fixed_field_length = \ mski_cp.get('fixed_field_length') self._kdw_mski_cp_invocation_field_length = \ mski_cp.get('invocation_field_length') self._kdw_mski_cp_counter_length = mski_cp.get('counter_length') self._kdw_mski_cp_initial_counter_value = \ mski_cp.get('initial_counter_value') self._kdw_mac_signature = value.get('mac_signature') self._kdw_iv_counter_nonce = value.get('iv_counter_nonce') self._kdw_encoding_option = value.get('encoding_option') class SymmetricKey(Key): """ The SymmetricKey class of the simplified KMIP object hierarchy. A SymmetricKey is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_algorithm: The type of algorithm for the SymmetricKey. cryptographic_length: The length in bits of the SymmetricKey value. value: The bytes of the SymmetricKey. key_format_type: The format of the key value. cryptographic_usage_masks: The list of usage mask flags for SymmetricKey application. names: The string names of the SymmetricKey. key_wrapping_data: A dictionary containing key wrapping data settings, describing how the key value has been wrapped. """ __tablename__ = 'symmetric_keys' unique_identifier = Column('uid', Integer, ForeignKey('keys.uid'), primary_key=True) __mapper_args__ = { 'polymorphic_identity': 'SymmetricKey' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, algorithm, length, value, masks=None, name='Symmetric Key', key_wrapping_data=None): """ Create a SymmetricKey. Args: algorithm(CryptographicAlgorithm): An enumeration identifying the type of algorithm for the key. length(int): The length in bits of the key. value(bytes): The bytes representing the key. masks(list): A list of CryptographicUsageMask enumerations defining how the key will be used. Optional, defaults to None. name(string): The string name of the key. Optional, defaults to 'Symmetric Key'. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None. """ super(SymmetricKey, self).__init__( key_wrapping_data=key_wrapping_data ) self._object_type = enums.ObjectType.SYMMETRIC_KEY self.key_format_type = enums.KeyFormatType.RAW self.value = value self.cryptographic_algorithm = algorithm self.cryptographic_length = length self.names = [name] if masks: self.cryptographic_usage_masks.extend(masks) # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._process_start_date = None self._protect_stop_date = None self.validate() def validate(self): """ Verify that the contents of the SymmetricKey object are valid. Raises: TypeError: if the types of any SymmetricKey attributes are invalid ValueError: if the key length and key value length do not match """ if not isinstance(self.value, bytes): raise TypeError("key value must be bytes") elif not isinstance(self.cryptographic_algorithm, enums.CryptographicAlgorithm): raise TypeError("key algorithm must be a CryptographicAlgorithm " "enumeration") elif not isinstance(self.cryptographic_length, six.integer_types): raise TypeError("key length must be an integer") mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "key mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("key name {0} must be a string".format( position)) if not self.key_wrapping_data: if (len(self.value) * 8) != self.cryptographic_length: msg = "key length ({0}) not equal to key value length ({1})" msg = msg.format( self.cryptographic_length, len(self.value) * 8 ) raise ValueError(msg) def __repr__(self): algorithm = "algorithm={0}".format(self.cryptographic_algorithm) length = "length={0}".format(self.cryptographic_length) value = "value={0}".format(binascii.hexlify(self.value)) key_wrapping_data = "key_wrapping_data={0}".format( self.key_wrapping_data ) return "SymmetricKey({0}, {1}, {2}, {3})".format( algorithm, length, value, key_wrapping_data ) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, SymmetricKey): if self.value != other.value: return False elif self.cryptographic_algorithm != other.cryptographic_algorithm: return False elif self.cryptographic_length != other.cryptographic_length: return False elif self.key_wrapping_data != other.key_wrapping_data: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, SymmetricKey): return not (self == other) else: return NotImplemented event.listen(SymmetricKey._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class PublicKey(Key): """ The PublicKey class of the simplified KMIP object hierarchy. A PublicKey is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_algorithm: The type of algorithm for the PublicKey. cryptographic_length: The length in bits of the PublicKey. value: The bytes of the PublicKey. key_format_type: The format of the key value. cryptographic_usage_masks: The list of usage mask flags for PublicKey application. names: The list of string names of the PublicKey. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. """ __tablename__ = 'public_keys' unique_identifier = Column('uid', Integer, ForeignKey('keys.uid'), primary_key=True) __mapper_args__ = { 'polymorphic_identity': 'PublicKey' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, algorithm, length, value, format_type=enums.KeyFormatType.X_509, masks=None, name='Public Key', key_wrapping_data=None): """ Create a PublicKey. Args: algorithm(CryptographicAlgorithm): An enumeration identifying the type of algorithm for the key. length(int): The length in bits of the key. value(bytes): The bytes representing the key. format_type(KeyFormatType): An enumeration defining the format of the key value. Optional, defaults to enums.KeyFormatType.X_509. masks(list): A list of CryptographicUsageMask enumerations defining how the key will be used. Optional, defaults to None. name(string): The string name of the key. Optional, defaults to 'Public Key'. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None. """ super(PublicKey, self).__init__( key_wrapping_data=key_wrapping_data ) self._object_type = enums.ObjectType.PUBLIC_KEY self._valid_formats = [ enums.KeyFormatType.RAW, enums.KeyFormatType.X_509, enums.KeyFormatType.PKCS_1] self.value = value self.cryptographic_algorithm = algorithm self.cryptographic_length = length self.key_format_type = format_type self.names = [name] if masks: self.cryptographic_usage_masks = masks # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._cryptographic_domain_parameters = list() self.validate() def validate(self): """ Verify that the contents of the PublicKey object are valid. Raises: TypeError: if the types of any PublicKey attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("key value must be bytes") elif not isinstance(self.cryptographic_algorithm, enums.CryptographicAlgorithm): raise TypeError("key algorithm must be a CryptographicAlgorithm " "enumeration") elif not isinstance(self.cryptographic_length, six.integer_types): raise TypeError("key length must be an integer") elif not isinstance(self.key_format_type, enums.KeyFormatType): raise TypeError("key format type must be a KeyFormatType " "enumeration") elif self.key_format_type not in self._valid_formats: raise ValueError("key format type must be one of {0}".format( self._valid_formats)) # TODO (peter-hamilton) Verify that the key bytes match the key format mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "key mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("key name {0} must be a string".format( position)) def __repr__(self): algorithm = "algorithm={0}".format(self.cryptographic_algorithm) length = "length={0}".format(self.cryptographic_length) value = "value={0}".format(binascii.hexlify(self.value)) format_type = "format_type={0}".format(self.key_format_type) key_wrapping_data = "key_wrapping_data={0}".format( self.key_wrapping_data ) return "PublicKey({0}, {1}, {2}, {3}, {4})".format( algorithm, length, value, format_type, key_wrapping_data) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, PublicKey): if self.value != other.value: return False elif self.key_format_type != other.key_format_type: return False elif self.cryptographic_algorithm != other.cryptographic_algorithm: return False elif self.cryptographic_length != other.cryptographic_length: return False elif self.key_wrapping_data != other.key_wrapping_data: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, PublicKey): return not (self == other) else: return NotImplemented event.listen(PublicKey._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class PrivateKey(Key): """ The PrivateKey class of the simplified KMIP object hierarchy. A PrivateKey is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_algorithm: The type of algorithm for the PrivateKey. cryptographic_length: The length in bits of the PrivateKey. value: The bytes of the PrivateKey. key_format_type: The format of the key value. cryptographic_usage_masks: The list of usage mask flags for PrivateKey application. Optional, defaults to None. names: The list of string names of the PrivateKey. Optional, defaults to 'Private Key'. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. """ __tablename__ = 'private_keys' unique_identifier = Column('uid', Integer, ForeignKey('keys.uid'), primary_key=True) __mapper_args__ = { 'polymorphic_identity': 'PrivateKey' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, algorithm, length, value, format_type, masks=None, name='Private Key', key_wrapping_data=None): """ Create a PrivateKey. Args: algorithm(CryptographicAlgorithm): An enumeration identifying the type of algorithm for the key. length(int): The length in bits of the key. value(bytes): The bytes representing the key. format_type(KeyFormatType): An enumeration defining the format of the key value. masks(list): A list of CryptographicUsageMask enumerations defining how the key will be used. name(string): The string name of the key. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None. """ super(PrivateKey, self).__init__( key_wrapping_data=key_wrapping_data ) self._object_type = enums.ObjectType.PRIVATE_KEY self._valid_formats = [ enums.KeyFormatType.RAW, enums.KeyFormatType.PKCS_1, enums.KeyFormatType.PKCS_8] self.value = value self.cryptographic_algorithm = algorithm self.cryptographic_length = length self.key_format_type = format_type self.names = [name] if masks: self.cryptographic_usage_masks = masks # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._cryptographic_domain_parameters = list() self.validate() def validate(self): """ Verify that the contents of the PrivateKey object are valid. Raises: TypeError: if the types of any PrivateKey attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("key value must be bytes") elif not isinstance(self.cryptographic_algorithm, enums.CryptographicAlgorithm): raise TypeError("key algorithm must be a CryptographicAlgorithm " "enumeration") elif not isinstance(self.cryptographic_length, six.integer_types): raise TypeError("key length must be an integer") elif not isinstance(self.key_format_type, enums.KeyFormatType): raise TypeError("key format type must be a KeyFormatType " "enumeration") elif self.key_format_type not in self._valid_formats: raise ValueError("key format type must be one of {0}".format( self._valid_formats)) # TODO (peter-hamilton) Verify that the key bytes match the key format mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "key mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("key name {0} must be a string".format( position)) def __repr__(self): algorithm = "algorithm={0}".format(self.cryptographic_algorithm) length = "length={0}".format(self.cryptographic_length) value = "value={0}".format(binascii.hexlify(self.value)) format_type = "format_type={0}".format(self.key_format_type) key_wrapping_data = "key_wrapping_data={0}".format( self.key_wrapping_data ) return "PrivateKey({0}, {1}, {2}, {3}, {4})".format( algorithm, length, value, format_type, key_wrapping_data) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, PrivateKey): if self.value != other.value: return False elif self.key_format_type != other.key_format_type: return False elif self.cryptographic_algorithm != other.cryptographic_algorithm: return False elif self.cryptographic_length != other.cryptographic_length: return False elif self.key_wrapping_data != other.key_wrapping_data: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, PrivateKey): return not (self == other) else: return NotImplemented event.listen(PrivateKey._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class Certificate(CryptographicObject): """ The Certificate class of the simplified KMIP object hierarchy. A Certificate is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: certificate_type: The type of the Certificate. value: The bytes of the Certificate. cryptographic_usage_masks: The list of usage mask flags for Certificate application. names: The list of string names of the Certificate. """ __tablename__ = 'certificates' unique_identifier = Column('uid', Integer, ForeignKey('crypto_objects.uid'), primary_key=True) certificate_type = Column( 'certificate_type', sql.EnumType(enums.CertificateType)) __mapper_args__ = { 'polymorphic_identity': 'Certificate' } __table_args__ = { 'sqlite_autoincrement': True } @abstractmethod def __init__(self, certificate_type, value, masks=None, name='Certificate'): """ Create a Certificate. Args: certificate_type(CertificateType): An enumeration defining the type of the certificate. value(bytes): The bytes representing the certificate. masks(list): A list of CryptographicUsageMask enumerations defining how the certificate will be used. name(string): The string name of the certificate. """ super(Certificate, self).__init__() self._object_type = enums.ObjectType.CERTIFICATE self.value = value self.certificate_type = certificate_type self.names = [name] if masks: self.cryptographic_usage_masks = masks # All remaining attributes are not considered part of the public API # and are subject to change. self._cryptographic_algorithm = None self._cryptographic_length = None self._certificate_length = None # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._cryptographic_parameters = list() self._digital_signature_algorithm = list() self.validate() def validate(self): """ Verify that the contents of the Certificate object are valid. Raises: TypeError: if the types of any Certificate attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("certificate value must be bytes") elif not isinstance(self.certificate_type, enums.CertificateType): raise TypeError("certificate type must be a CertificateType " "enumeration") mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "certificate mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("certificate name {0} must be a string".format( position)) def __str__(self): return str(binascii.hexlify(self.value)) class X509Certificate(Certificate): """ The X509Certificate class of the simplified KMIP object hierarchy. An X509Certificate is a core KMIP object that is the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: value: The bytes of the Certificate. cryptographic_usage_masks: The list of usage mask flags for Certificate application. names: The list of string names of the Certificate. """ __tablename__ = 'x509_certificates' unique_identifier = Column('uid', Integer, ForeignKey('certificates.uid'), primary_key=True) __mapper_args__ = { 'polymorphic_identity': 'X509Certificate' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, value, masks=None, name='X.509 Certificate'): """ Create an X509Certificate. Args: value(bytes): The bytes representing the certificate. masks(list): A list of CryptographicUsageMask enumerations defining how the certificate will be used. name(string): The string name of the certificate. """ super(X509Certificate, self).__init__( enums.CertificateType.X_509, value, masks, name) # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._x509_certificate_identifier = None self._x509_certificate_subject = None self._x509_certificate_issuer = None self.validate() def __repr__(self): certificate_type = "certificate_type={0}".format(self.certificate_type) value = "value={0}".format(binascii.hexlify(self.value)) return "X509Certificate({0}, {1})".format(certificate_type, value) def __eq__(self, other): if isinstance(other, X509Certificate): if self.value != other.value: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, X509Certificate): return not (self == other) else: return NotImplemented event.listen(X509Certificate._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class SecretData(CryptographicObject): """ The SecretData class of the simplified KMIP object hierarchy. SecretData is one of several CryptographicObjects and is one of the core KMIP objects that are the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: cryptographic_usage_masks: A list of usage mask enumerations describing how the CryptographicObject will be used. data_type: The type of the secret value. """ __tablename__ = 'secret_data_objects' unique_identifier = Column('uid', Integer, ForeignKey('crypto_objects.uid'), primary_key=True) data_type = Column('data_type', sql.EnumType(enums.SecretDataType)) __mapper_args__ = { 'polymorphic_identity': 'SecretData' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, value, data_type, masks=None, name='Secret Data'): """ Create a SecretData object. Args: value(bytes): The bytes representing secret data. data_type(SecretDataType): An enumeration defining the type of the secret value. masks(list): A list of CryptographicUsageMask enumerations defining how the key will be used. name(string): The string name of the key. """ super(SecretData, self).__init__() self._object_type = enums.ObjectType.SECRET_DATA self.value = value self.data_type = data_type self.names = [name] if masks: self.cryptographic_usage_masks = masks # All remaining attributes are not considered part of the public API # and are subject to change. # The following attributes are placeholders for attributes that are # unsupported by kmip.core self.validate() def validate(self): """ Verify that the contents of the SecretData object are valid. Raises: TypeError: if the types of any SecretData attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("secret value must be bytes") elif not isinstance(self.data_type, enums.SecretDataType): raise TypeError("secret data type must be a SecretDataType " "enumeration") mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if not isinstance(mask, enums.CryptographicUsageMask): position = "({0} in list)".format(i) raise TypeError( "secret data mask {0} must be a CryptographicUsageMask " "enumeration".format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("secret data name {0} must be a string".format( position)) def __repr__(self): value = "value={0}".format(binascii.hexlify(self.value)) data_type = "data_type={0}".format(self.data_type) return "SecretData({0}, {1})".format(value, data_type) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, SecretData): if self.value != other.value: return False elif self.data_type != other.data_type: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, SecretData): return not (self == other) else: return NotImplemented event.listen(SecretData._names, 'append', sql.attribute_append_factory("name_index"), retval=False) class OpaqueObject(ManagedObject): """ The OpaqueObject class of the simplified KMIP object hierarchy. OpaqueObject is one of several ManagedObjects and is one of the core KMIP objects that are the subject of key management operations. For more information, see Section 2.2 of the KMIP 1.1 specification. Attributes: opaque_type: The type of the opaque value. """ __tablename__ = 'opaque_objects' unique_identifier = Column('uid', Integer, ForeignKey('managed_objects.uid'), primary_key=True) opaque_type = Column('opaque_type', sql.EnumType(enums.OpaqueDataType)) __mapper_args__ = { 'polymorphic_identity': 'OpaqueData' } __table_args__ = { 'sqlite_autoincrement': True } def __init__(self, value, opaque_type, name='Opaque Object'): """ Create a OpaqueObject. Args: value(bytes): The bytes representing opaque data. opaque_type(OpaqueDataType): An enumeration defining the type of the opaque value. name(string): The string name of the opaque object. """ super(OpaqueObject, self).__init__() self._object_type = enums.ObjectType.OPAQUE_DATA self.value = value self.opaque_type = opaque_type self.names.append(name) # All remaining attributes are not considered part of the public API # and are subject to change. self._digest = None self._revocation_reason = None # The following attributes are placeholders for attributes that are # unsupported by kmip.core self._destroy_date = None self._compromise_occurrence_date = None self._compromise_date = None self.validate() def validate(self): """ Verify that the contents of the OpaqueObject are valid. Raises: TypeError: if the types of any OpaqueObject attributes are invalid. """ if not isinstance(self.value, bytes): raise TypeError("opaque value must be bytes") elif not isinstance(self.opaque_type, enums.OpaqueDataType): raise TypeError("opaque data type must be an OpaqueDataType " "enumeration") name_count = len(self.names) for i in range(name_count): name = self.names[i] if not isinstance(name, six.string_types): position = "({0} in list)".format(i) raise TypeError("opaque data name {0} must be a string".format( position)) def __repr__(self): value = "value={0}".format(binascii.hexlify(self.value)) opaque_type = "opaque_type={0}".format(self.opaque_type) return "OpaqueObject({0}, {1})".format(value, opaque_type) def __str__(self): return str(binascii.hexlify(self.value)) def __eq__(self, other): if isinstance(other, OpaqueObject): if self.value != other.value: return False elif self.opaque_type != other.opaque_type: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, OpaqueObject): return not (self == other) else: return NotImplemented event.listen(OpaqueObject._names, 'append', sql.attribute_append_factory("name_index"), retval=False)
0.866048
0.347842
from bigml.tree_utils import INDENT from bigml.generators.model import PYTHON_OPERATOR, missing_branch, \ none_value from bigml.predict_utils.common import mintree_split, get_predicate, get_node from bigml.predict_utils.common import OPERATION_OFFSET, FIELD_OFFSET, \ VALUE_OFFSET, MISSING_OFFSET from bigml.generators.tree_common import filter_nodes T_MISSING_OPERATOR = { "=": "ISNULL(", "!=": "NOT ISNULL(" } # Map operator str to its corresponding mysql operator MYSQL_OPERATOR = { "/=": "!="} def value_to_print(value, optype): """String of code that represents a value according to its type """ if value is None: return "NULL" if optype == 'numeric': return value return "'%s'" % value.replace("'", '\\\'') def missing_check_code(tree, offsets, fields, objective_id, field, alternate, cmv, attr=None): """Builds the code to predict when the field is missing """ node = get_node(tree) condition = "ISNULL(`%s`)" % fields[field]['name'] code = ("%s (%s)" % (alternate, condition)) # used when printing the confidence metric if attr is not None: value = node[offsets[attr]] else: value = value_to_print( \ node[offsets["output"]], fields[objective_id]['optype']) code += (", %s" % value) cmv.append(fields[field]['name']) return code def missing_prefix_code(tree, fields, field, cmv): """Part of the condition that checks for missings when missing_splits has been used """ predicate = get_predicate(tree) missing = predicate[MISSING_OFFSET] negation = "" if missing else "NOT " connection = "OR" if missing else "AND" if not missing: cmv.append(fields[field]['name']) return "(%sISNULL(`%s`) %s " % ( \ negation, fields[field]['name'], connection) def split_condition_code(tree, fields, field, alternate, pre_condition): """Condition code for the split """ predicate = get_predicate(tree) post_condition = "" optype = fields[field]['optype'] value = value_to_print(predicate[VALUE_OFFSET], optype) operation = predicate[OPERATION_OFFSET] operator = ("" if predicate[VALUE_OFFSET] is None else MYSQL_OPERATOR.get(operation, PYTHON_OPERATOR.get(operation))) if predicate[VALUE_OFFSET] is None: value = "" pre_condition = ( T_MISSING_OPERATOR[operation]) post_condition = ")" condition = "%s`%s`%s%s%s" % ( \ pre_condition, fields[predicate[FIELD_OFFSET]]['name'], operator, value, post_condition) return "%s (%s)" % (alternate, condition) def plug_in_body(tree, offsets, fields, objective_id, depth=0, cmv=None, ids_path=None, subtree=True, body="", attr=None): """Translate the model into a mysql function `depth` controls the size of indentation. As soon as a value is missing that node is returned without further evaluation. `attr` is used to decide the value returned by the function. When it's set to None, the prediction is returned. When set to the name of an attribute (e.g. 'confidence') this attribute is returned """ if cmv is None: cmv = [] if body: alternate = ",\n%sIF (" % (depth * INDENT) else: alternate = "IF (" post_missing_body = "" node = get_node(tree) children = filter_nodes([] if node[offsets["children#"]] == 0 \ else node[offsets["children"]], offsets, ids=ids_path, subtree=subtree) if children: # field used in the split field = mintree_split(children) has_missing_branch = (missing_branch(children) or none_value(children)) # the missing is singled out as a special case only when there's # no missing branch in the children list if (not has_missing_branch and not fields[field]['name'] in cmv): body += missing_check_code(tree, offsets, fields, objective_id, field, alternate, cmv, attr) depth += 1 alternate = ",\n%sIF (" % (depth * INDENT) post_missing_body += ")" for child in children: pre_condition = "" predicate = get_predicate(child) # code when missing splits has been used if has_missing_branch and predicate[VALUE_OFFSET] is not None: pre_condition = missing_prefix_code(child, fields, field, cmv) # complete split condition code body += split_condition_code( \ child, fields, field, alternate, pre_condition) depth += 1 alternate = ",\n%sIF (" % (depth * INDENT) body = plug_in_body(child, offsets, fields, objective_id, depth, cmv=cmv[:], ids_path=ids_path, subtree=subtree, body=body, attr=attr) body += ", NULL))" + post_missing_body post_missing_body = "" else: if attr is None: value = value_to_print( \ node[offsets["output"]], fields[objective_id]['optype']) else: try: value = node[offsets[attr]] except KeyError: value = "NULL" body += ", %s" % (value) return body
bigmler/export/out_tree/mysqltree.py
from bigml.tree_utils import INDENT from bigml.generators.model import PYTHON_OPERATOR, missing_branch, \ none_value from bigml.predict_utils.common import mintree_split, get_predicate, get_node from bigml.predict_utils.common import OPERATION_OFFSET, FIELD_OFFSET, \ VALUE_OFFSET, MISSING_OFFSET from bigml.generators.tree_common import filter_nodes T_MISSING_OPERATOR = { "=": "ISNULL(", "!=": "NOT ISNULL(" } # Map operator str to its corresponding mysql operator MYSQL_OPERATOR = { "/=": "!="} def value_to_print(value, optype): """String of code that represents a value according to its type """ if value is None: return "NULL" if optype == 'numeric': return value return "'%s'" % value.replace("'", '\\\'') def missing_check_code(tree, offsets, fields, objective_id, field, alternate, cmv, attr=None): """Builds the code to predict when the field is missing """ node = get_node(tree) condition = "ISNULL(`%s`)" % fields[field]['name'] code = ("%s (%s)" % (alternate, condition)) # used when printing the confidence metric if attr is not None: value = node[offsets[attr]] else: value = value_to_print( \ node[offsets["output"]], fields[objective_id]['optype']) code += (", %s" % value) cmv.append(fields[field]['name']) return code def missing_prefix_code(tree, fields, field, cmv): """Part of the condition that checks for missings when missing_splits has been used """ predicate = get_predicate(tree) missing = predicate[MISSING_OFFSET] negation = "" if missing else "NOT " connection = "OR" if missing else "AND" if not missing: cmv.append(fields[field]['name']) return "(%sISNULL(`%s`) %s " % ( \ negation, fields[field]['name'], connection) def split_condition_code(tree, fields, field, alternate, pre_condition): """Condition code for the split """ predicate = get_predicate(tree) post_condition = "" optype = fields[field]['optype'] value = value_to_print(predicate[VALUE_OFFSET], optype) operation = predicate[OPERATION_OFFSET] operator = ("" if predicate[VALUE_OFFSET] is None else MYSQL_OPERATOR.get(operation, PYTHON_OPERATOR.get(operation))) if predicate[VALUE_OFFSET] is None: value = "" pre_condition = ( T_MISSING_OPERATOR[operation]) post_condition = ")" condition = "%s`%s`%s%s%s" % ( \ pre_condition, fields[predicate[FIELD_OFFSET]]['name'], operator, value, post_condition) return "%s (%s)" % (alternate, condition) def plug_in_body(tree, offsets, fields, objective_id, depth=0, cmv=None, ids_path=None, subtree=True, body="", attr=None): """Translate the model into a mysql function `depth` controls the size of indentation. As soon as a value is missing that node is returned without further evaluation. `attr` is used to decide the value returned by the function. When it's set to None, the prediction is returned. When set to the name of an attribute (e.g. 'confidence') this attribute is returned """ if cmv is None: cmv = [] if body: alternate = ",\n%sIF (" % (depth * INDENT) else: alternate = "IF (" post_missing_body = "" node = get_node(tree) children = filter_nodes([] if node[offsets["children#"]] == 0 \ else node[offsets["children"]], offsets, ids=ids_path, subtree=subtree) if children: # field used in the split field = mintree_split(children) has_missing_branch = (missing_branch(children) or none_value(children)) # the missing is singled out as a special case only when there's # no missing branch in the children list if (not has_missing_branch and not fields[field]['name'] in cmv): body += missing_check_code(tree, offsets, fields, objective_id, field, alternate, cmv, attr) depth += 1 alternate = ",\n%sIF (" % (depth * INDENT) post_missing_body += ")" for child in children: pre_condition = "" predicate = get_predicate(child) # code when missing splits has been used if has_missing_branch and predicate[VALUE_OFFSET] is not None: pre_condition = missing_prefix_code(child, fields, field, cmv) # complete split condition code body += split_condition_code( \ child, fields, field, alternate, pre_condition) depth += 1 alternate = ",\n%sIF (" % (depth * INDENT) body = plug_in_body(child, offsets, fields, objective_id, depth, cmv=cmv[:], ids_path=ids_path, subtree=subtree, body=body, attr=attr) body += ", NULL))" + post_missing_body post_missing_body = "" else: if attr is None: value = value_to_print( \ node[offsets["output"]], fields[objective_id]['optype']) else: try: value = node[offsets[attr]] except KeyError: value = "NULL" body += ", %s" % (value) return body
0.588298
0.265577
import random import pygame,sys from pygame.locals import * import constant as const #Declaring a key press inventory key = { const.DIRT : K_1, const.GRASS : K_2, const.WATER : K_3, const.COAL : K_4, const.ROCK : K_5, const.LAVA : K_6 } #Declaring an inventory inventory = { const.DIRT : 0, const.GRASS : 0, const.WATER : 0, const.COAL : 0, const.ROCK : 0, const.LAVA : 0 } def inital(): #Initializes the game and display part pygame.init() global screen screen = pygame.display.set_mode((const.MAPWIDTH * const.TILESIZE, const.MAPHEIGHT * const.TILESIZE + const.INVENTORY_HEIGHT)) pygame.display.set_caption('Mineython') pygame.display.set_icon(pygame.image.load('PLAYER.png')) #Making the player global PLAYER global playerPos PLAYER = pygame.image.load('PLAYER.png').convert_alpha() playerPos = [0,0] #font for our inventory global INVFONT INVFONT = pygame.font.Font('FreeSansBold.ttf', 18) def animate(speed,texture,coord): screen.blit(const.textures[texture].convert_alpha(),coord) coord[0] += speed if coord[0] > const.MAPWIDTH * const.TILESIZE: coord[1] = random.randint(0, const.MAPHEIGHT * const.TILESIZE - const.TILESIZE) coord[0] = -200 cloudy = [-200,0] cloudy2 = [300,2] bird = [-100,2] def game(): while True: screen.fill(const.BLACK) #Fps counter set to 24 frames fpsClock = pygame.time.Clock() #Getting curent events and responding for event in pygame.event.get(): #print(event) #If the player wants to quit if event.type == QUIT: pygame.quit() sys.exit() elif event.type == KEYDOWN: #Player movement if event.key == K_RIGHT and playerPos[0] < const.MAPWIDTH - 1: playerPos[0] += 1 elif event.key == K_LEFT and playerPos[0] > 0: playerPos[0] -= 1 elif event.key == K_DOWN and playerPos[1] < const.MAPHEIGHT - 1: playerPos[1] += 1 elif event.key == K_UP and playerPos[1] > 0: playerPos[1] -= 1 #Player inventory logic elif event.key == K_SPACE: curentTile = const.tilemap[playerPos[1]][playerPos[0]] if curentTile != const.DIRT and curentTile != const.LAVA: inventory[curentTile] += 1 const.tilemap[playerPos[1]][playerPos[0]] = const.DIRT for item in const.resources: if event.key == key[item]: curentTile = const.tilemap[playerPos[1]][playerPos[0]] if inventory[item] > 0: inventory[item] -= 1 const.tilemap[playerPos[1]][playerPos[0]] = item if curentTile !=const.LAVA: inventory[curentTile] += 1 #Drawing the map for row in range(const.MAPHEIGHT): for column in range(const.MAPWIDTH): screen.blit(const.textures[const.tilemap[row][column]], (column*const.TILESIZE, row*const.TILESIZE, const.TILESIZE, const.TILESIZE)) #Displaying the player screen.blit(PLAYER,(playerPos[0] * const.TILESIZE, playerPos[1] * const.TILESIZE)) #Displaying the inventory placePosition = const.INVENTORY_START_POSITION for item in const.resources: screen.blit(const.textures[item], (placePosition, const.MAPHEIGHT * const.TILESIZE + const.INVENTORY_SPACE_BETWEEN)) placePosition += const.INVENTORY_SPACE_WIDTH textObj = INVFONT.render(str(inventory[item]), True, const.WHITE, const.BLACK) screen.blit(textObj, (placePosition + const.INVENTORY_SPACE_BETWEEN, const.MAPHEIGHT * const.TILESIZE + const.INVENTORY_SPACE_WIDTH)) placePosition += const.INVENTORY_HEIGHT #Displaying the cloud animate(1, const.CLOUD, cloudy) animate(3, const.BIRD, bird) animate(2, const.CLOUD, cloudy2) #update the display pygame.display.update() fpsClock.tick(24) inital() game()
mineython.py
import random import pygame,sys from pygame.locals import * import constant as const #Declaring a key press inventory key = { const.DIRT : K_1, const.GRASS : K_2, const.WATER : K_3, const.COAL : K_4, const.ROCK : K_5, const.LAVA : K_6 } #Declaring an inventory inventory = { const.DIRT : 0, const.GRASS : 0, const.WATER : 0, const.COAL : 0, const.ROCK : 0, const.LAVA : 0 } def inital(): #Initializes the game and display part pygame.init() global screen screen = pygame.display.set_mode((const.MAPWIDTH * const.TILESIZE, const.MAPHEIGHT * const.TILESIZE + const.INVENTORY_HEIGHT)) pygame.display.set_caption('Mineython') pygame.display.set_icon(pygame.image.load('PLAYER.png')) #Making the player global PLAYER global playerPos PLAYER = pygame.image.load('PLAYER.png').convert_alpha() playerPos = [0,0] #font for our inventory global INVFONT INVFONT = pygame.font.Font('FreeSansBold.ttf', 18) def animate(speed,texture,coord): screen.blit(const.textures[texture].convert_alpha(),coord) coord[0] += speed if coord[0] > const.MAPWIDTH * const.TILESIZE: coord[1] = random.randint(0, const.MAPHEIGHT * const.TILESIZE - const.TILESIZE) coord[0] = -200 cloudy = [-200,0] cloudy2 = [300,2] bird = [-100,2] def game(): while True: screen.fill(const.BLACK) #Fps counter set to 24 frames fpsClock = pygame.time.Clock() #Getting curent events and responding for event in pygame.event.get(): #print(event) #If the player wants to quit if event.type == QUIT: pygame.quit() sys.exit() elif event.type == KEYDOWN: #Player movement if event.key == K_RIGHT and playerPos[0] < const.MAPWIDTH - 1: playerPos[0] += 1 elif event.key == K_LEFT and playerPos[0] > 0: playerPos[0] -= 1 elif event.key == K_DOWN and playerPos[1] < const.MAPHEIGHT - 1: playerPos[1] += 1 elif event.key == K_UP and playerPos[1] > 0: playerPos[1] -= 1 #Player inventory logic elif event.key == K_SPACE: curentTile = const.tilemap[playerPos[1]][playerPos[0]] if curentTile != const.DIRT and curentTile != const.LAVA: inventory[curentTile] += 1 const.tilemap[playerPos[1]][playerPos[0]] = const.DIRT for item in const.resources: if event.key == key[item]: curentTile = const.tilemap[playerPos[1]][playerPos[0]] if inventory[item] > 0: inventory[item] -= 1 const.tilemap[playerPos[1]][playerPos[0]] = item if curentTile !=const.LAVA: inventory[curentTile] += 1 #Drawing the map for row in range(const.MAPHEIGHT): for column in range(const.MAPWIDTH): screen.blit(const.textures[const.tilemap[row][column]], (column*const.TILESIZE, row*const.TILESIZE, const.TILESIZE, const.TILESIZE)) #Displaying the player screen.blit(PLAYER,(playerPos[0] * const.TILESIZE, playerPos[1] * const.TILESIZE)) #Displaying the inventory placePosition = const.INVENTORY_START_POSITION for item in const.resources: screen.blit(const.textures[item], (placePosition, const.MAPHEIGHT * const.TILESIZE + const.INVENTORY_SPACE_BETWEEN)) placePosition += const.INVENTORY_SPACE_WIDTH textObj = INVFONT.render(str(inventory[item]), True, const.WHITE, const.BLACK) screen.blit(textObj, (placePosition + const.INVENTORY_SPACE_BETWEEN, const.MAPHEIGHT * const.TILESIZE + const.INVENTORY_SPACE_WIDTH)) placePosition += const.INVENTORY_HEIGHT #Displaying the cloud animate(1, const.CLOUD, cloudy) animate(3, const.BIRD, bird) animate(2, const.CLOUD, cloudy2) #update the display pygame.display.update() fpsClock.tick(24) inital() game()
0.156975
0.293848
from .Compatibility import Compatibility as c from ftplib import FTP import ftplib, time class FtpObject(): def __init__(self, log = True, ftp_config = None): self.log = log self.ftp_config = ftp_config # FTP connect self.ftp = FTP(self.ftp_config.hostname, self.ftp_config.username, self.ftp_config.password) self.root = "%s/%s/" % (self.ftp.pwd(), self.ftp_config.backup_name) self.directory = [] self.files = [] self.temp_directory = [] self.temp_files = [] self.create_dir_backup() self.tree(self.root) def create_dir_backup(self): if not self.ftp_config.backup_name in self.ftp.nlst(): self.ftp.mkd(self.ftp_config.backup_name) def tree(self, dir, temp=False): try: for item in self.ftp.nlst(dir): if "." in item: # Exclude files if temp: self.temp_files.append(item.lstrip(self.root)) else: self.files.append(item.lstrip(self.root)) else: if temp: self.temp_directory.append(item.lstrip(self.root)) self.tree(item, temp=True) else: self.directory.append(item.lstrip(self.root)) self.tree(item) except ftplib.error_perm as e: print(e) def dir_push(self, parent, dir): self.ftp.cwd("/%s/%s" % (self.ftp_config.backup_name, parent)) # Moove to parent dir self.ftp.mkd(dir) # Create directory if self.log: print("Sending directory '%s%s'" % (parent, dir)) def dir_del(self, parent, dir_del): try: self.ftp.cwd("/%s/%s" % (self.ftp_config.backup_name, parent)) # Moove to parent dir self.ftp.rmd(dir_del) # Delete directory if self.log: print("Deleting directory '%s%s'" % (parent, dir_del)) except ftplib.error_perm as e: print(e) def file_push(self, dir=None, file=None, timestamp=False): if not timestamp: with open("%s/%s/%s" % (self.ftp_config.backup_dir, dir, file), "rb") as file_to_push: dir = c.dir_windows_to_ftp(dir) self.ftp.cwd("/%s/%s" % (self.root, dir)) self.ftp.storbinary('STOR ' + file, file_to_push) if self.log: print("Sending file '%s%s'" % (dir, file)) else: with open(file, "rb") as file_to_push: self.ftp.cwd(self.root[:-1]) self.ftp.storbinary('STOR ' + file, file_to_push) def file_del(self, dir, file): self.ftp.cwd("%s%s" % (self.root, dir)) self.ftp.delete(file) if self.log: print("Deleting file '%s%s'" % (dir, file)) def get_time(self, dir, file): self.ftp.cwd("%s%s" % (self.root, dir)) datetime = self.ftp.voidcmd("MDTM " + file)[4:].strip() return time.mktime(time.strptime(datetime, '%Y%m%d%H%M%S'))
bebackup/FtpObject.py
from .Compatibility import Compatibility as c from ftplib import FTP import ftplib, time class FtpObject(): def __init__(self, log = True, ftp_config = None): self.log = log self.ftp_config = ftp_config # FTP connect self.ftp = FTP(self.ftp_config.hostname, self.ftp_config.username, self.ftp_config.password) self.root = "%s/%s/" % (self.ftp.pwd(), self.ftp_config.backup_name) self.directory = [] self.files = [] self.temp_directory = [] self.temp_files = [] self.create_dir_backup() self.tree(self.root) def create_dir_backup(self): if not self.ftp_config.backup_name in self.ftp.nlst(): self.ftp.mkd(self.ftp_config.backup_name) def tree(self, dir, temp=False): try: for item in self.ftp.nlst(dir): if "." in item: # Exclude files if temp: self.temp_files.append(item.lstrip(self.root)) else: self.files.append(item.lstrip(self.root)) else: if temp: self.temp_directory.append(item.lstrip(self.root)) self.tree(item, temp=True) else: self.directory.append(item.lstrip(self.root)) self.tree(item) except ftplib.error_perm as e: print(e) def dir_push(self, parent, dir): self.ftp.cwd("/%s/%s" % (self.ftp_config.backup_name, parent)) # Moove to parent dir self.ftp.mkd(dir) # Create directory if self.log: print("Sending directory '%s%s'" % (parent, dir)) def dir_del(self, parent, dir_del): try: self.ftp.cwd("/%s/%s" % (self.ftp_config.backup_name, parent)) # Moove to parent dir self.ftp.rmd(dir_del) # Delete directory if self.log: print("Deleting directory '%s%s'" % (parent, dir_del)) except ftplib.error_perm as e: print(e) def file_push(self, dir=None, file=None, timestamp=False): if not timestamp: with open("%s/%s/%s" % (self.ftp_config.backup_dir, dir, file), "rb") as file_to_push: dir = c.dir_windows_to_ftp(dir) self.ftp.cwd("/%s/%s" % (self.root, dir)) self.ftp.storbinary('STOR ' + file, file_to_push) if self.log: print("Sending file '%s%s'" % (dir, file)) else: with open(file, "rb") as file_to_push: self.ftp.cwd(self.root[:-1]) self.ftp.storbinary('STOR ' + file, file_to_push) def file_del(self, dir, file): self.ftp.cwd("%s%s" % (self.root, dir)) self.ftp.delete(file) if self.log: print("Deleting file '%s%s'" % (dir, file)) def get_time(self, dir, file): self.ftp.cwd("%s%s" % (self.root, dir)) datetime = self.ftp.voidcmd("MDTM " + file)[4:].strip() return time.mktime(time.strptime(datetime, '%Y%m%d%H%M%S'))
0.200636
0.071689
from openstack import proxy from otcextensions.sdk.auto_scaling.v1 import activity as _activity from otcextensions.sdk.auto_scaling.v1 import config as _config from otcextensions.sdk.auto_scaling.v1 import group as _group from otcextensions.sdk.auto_scaling.v1 import instance as _instance from otcextensions.sdk.auto_scaling.v1 import policy as _policy from otcextensions.sdk.auto_scaling.v1 import quota as _quota class Proxy(proxy.Proxy): skip_discovery = True # ======== Groups ======== def groups(self, **query): """Retrieve a generator of groups :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``name``: group name * ``status``: group status, ``INSERVICE``, ``PAUSED``, ``ERROR`` * ``scaling_configuration_id``: scaling configuration id * ``marker``: pagination marker, known as ``start_number`` * ``limit``: pagination limit :returns: A generator of group (:class:`~otcextensions.sdk.auto_scaling.v1.group.Group`) instances """ return self._list(_group.Group, **query) def create_group(self, **attrs): """Create a new group from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`, comprised of the properties on the Group class. :returns: The results of group creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` """ return self._create( _group.Group, prepend_key=False, **attrs ) def update_group(self, group, **attrs): """update group with attributes :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`, comprised of the properties on the Group class. :returns: The results of group creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` """ return self._update( _group.Group, group, prepend_key=False, **attrs) def get_group(self, group): """Get a group :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :returns: Group instance :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` """ return self._get( _group.Group, group, ) def delete_group(self, group, ignore_missing=True): """Delete a group :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent group. """ return self._delete( _group.Group, group, ignore_missing=ignore_missing, ) def find_group(self, name_or_id, ignore_missing=True): """Find a single group :param name_or_id: The name or ID of a group :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent group. :returns: ``None`` """ return self._find( _group.Group, name_or_id, ignore_missing=ignore_missing, ) def resume_group(self, group): """resume group :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. """ group = self._get_resource( _group.Group, group) return group.resume(self) def pause_group(self, group): """pause group :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. """ group = self._get_resource( _group.Group, group ) return group.pause(self) # ======== Configurations ======== def configs(self, **query): """Retrieve a generator of configs :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``name``: configuration name * ``image_id``: image id * ``marker``: pagination marker * ``limit``: pagination limit :returns: A generator of config (:class:`~otcextensions.sdk.auto_scaling.v1.config.Config`) instances """ return self._list(_config.Config, **query) def create_config(self, name, **attrs): """Create a new config from config name and instance-config attributes :param name: auto scaling config name :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.config.InstanceConfig` , comprised of the properties on the InstanceConfig class. :returns: The results of config creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` """ return self._create( _config.Config, prepend_key=False, name=name, **attrs ) def get_config(self, config): """Get a config :param config: The value can be the ID of a config or a :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` instance. :returns: Config instance :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` """ return self._get(_config.Config, config) # Name is not unique, so find might return multiple results def find_config(self, name_or_id, ignore_missing=True): """Get a config :param name_or_id: The name or ID of a config :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the config does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent config. :returns: Config instance :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` """ return self._find( _config.Config, name_or_id, ignore_missing=ignore_missing, ) def delete_config(self, config, ignore_missing=True): """Delete a config :param config: The value can be the ID of a config or a :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the config does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent config. :returns: Config been deleted :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` """ return self._delete( _config.Config, config, ignore_missing=ignore_missing, ) def batch_delete_configs(self, configs): """batch delete configs :param configs: The list item value can be the ID of a config or a :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` instance. """ config = _config.Config() return config.batch_delete(self, configs) # ======== Policy ======== def policies(self, group, **query): """Retrieve a generator of policies :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``name``: policy name * ``type``: policy type * ``scaling_group_id``: scaling group id the policy applied to * ``marker``: pagination marker * ``limit``: pagination limit :returns: A generator of policy (:class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`) instances """ group = self._get_resource(_group.Group, group) return self._list( _policy.Policy, base_path='/scaling_policy/{id}/list'.format(id=group.id), **query) def create_policy(self, **attrs): """Create a new policy from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`, comprised of the properties on the Policy class. :returns: The results of policy creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` """ return self._create(_policy.Policy, prepend_key=False, **attrs) def update_policy(self, policy, **attrs): """update policy with attributes :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`, comprised of the properties on the Policy class. :returns: The results of policy creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` """ return self._update(_policy.Policy, policy, prepend_key=False, **attrs) def get_policy(self, policy): """Get a policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. :returns: Policy instance :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` """ return self._get(_policy.Policy, policy) def delete_policy(self, policy, ignore_missing=True): """Delete a policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent policy. :returns: Policy been deleted :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` """ return self._delete(_policy.Policy, policy, ignore_missing=ignore_missing) def find_policy(self, name_or_id, group, ignore_missing=True): """Find a single policy :param name_or_id: The name or ID of a policy :param group: ID of a group :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent policy. :returns: ``None`` """ group = self._get_resource(_group.Group, group) return self._find(_policy.Policy, name_or_id, ignore_missing=ignore_missing, group_id=group.id) def execute_policy(self, policy): """execute policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. """ policy = self._get_resource(_policy.Policy, policy) policy.execute(self) def resume_policy(self, policy): """resume policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. """ policy = self._get_resource(_policy.Policy, policy) policy.resume(self) def pause_policy(self, policy): """pause policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. """ policy = self._get_resource(_policy.Policy, policy) policy.pause(self) # ======== Instances ======== def instances(self, group, **query): """Retrieve a generator of instances :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``health_status``: instance health status * ``lifecycle_status``: policy type * ``scaling_group_id``: scaling group id the policy applied to * ``marker``: pagination marker * ``limit``: pagination limit :returns: A generator of instances with type (:class:`~otcextensions.sdk.auto_scaling.v1.instance.Instance`) """ group = self._get_resource(_group.Group, group) return self._list( _instance.Instance, base_path='/scaling_group_instance/{id}/list'.format(id=group.id), **query) def remove_instance(self, instance, delete_instance=False, ignore_missing=True): """Remove an instance of auto scaling group :precondition: * the instance must in ``INSERVICE`` status * after remove the instance number of auto scaling group should not be less than min instance number * The own auto scaling group should not in scaling status :param instance: The value can be the ID of a instance or a :class:`~otcextensions.sdk.auto_scaling.v1.instance.Instance` instance. :param bool delete_instance: When set to ``True``, instance will be deleted after removed. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the config does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent config. :returns: None """ instance = self._get_resource(_instance.Instance, instance) return instance.remove(self, delete_instance=delete_instance, ignore_missing=ignore_missing) def batch_instance_action( self, group, instances, action, delete_instance=False): """Batch add instances for auto scaling group :param group: The group which instances will be added to, The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param instances: The list item value can be ID of an instance or a :class:`~otcextensions.sdk.auto_scaling.v1.instance.Instance` instance :param action: Action type [``ADD``, ``REMOVE``, ``PROTECT``, ``UNPROTECT``] :param delete_instance: When set to ``True``, instance will be deleted after removed """ group = self._get_resource(_group.Group, group) instance = _instance.Instance(scaling_group_id=group.id) return instance.batch_action(self, instances, action, delete_instance) # ======== Activities ======== def activities(self, group, **query): """Retrieve a generator of Activity :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``start_time``: activity start time * ``end_time``: activity end time * ``marker``: pagination marker, known as ``start_number`` * ``limit``: pagination limit :returns: A generator of group (:class:`~otcextensions.sdk.auto_scaling.v1.activity.Activity`) instances """ group = self._get_resource(_group.Group, group) return self._list(_activity.Activity, scaling_group_id=group.id, **query) # ======== Quotas ======== def quotas(self, group=None): """Retrieve a generator of Quota :param group: If group is set, will query quota for the group instead of quota of user. The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :returns: A generator of quota (:class:`~otcextensions.sdk.auto_scaling.v1.quota.Quota`) instances """ if group: group = self._get_resource(_group.Group, group) return self._list(_quota.ScalingQuota, paginated=False, scaling_group_id=group.id) else: return self._list(_quota.Quota, paginated=False)
otcextensions/sdk/auto_scaling/v1/_proxy.py
from openstack import proxy from otcextensions.sdk.auto_scaling.v1 import activity as _activity from otcextensions.sdk.auto_scaling.v1 import config as _config from otcextensions.sdk.auto_scaling.v1 import group as _group from otcextensions.sdk.auto_scaling.v1 import instance as _instance from otcextensions.sdk.auto_scaling.v1 import policy as _policy from otcextensions.sdk.auto_scaling.v1 import quota as _quota class Proxy(proxy.Proxy): skip_discovery = True # ======== Groups ======== def groups(self, **query): """Retrieve a generator of groups :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``name``: group name * ``status``: group status, ``INSERVICE``, ``PAUSED``, ``ERROR`` * ``scaling_configuration_id``: scaling configuration id * ``marker``: pagination marker, known as ``start_number`` * ``limit``: pagination limit :returns: A generator of group (:class:`~otcextensions.sdk.auto_scaling.v1.group.Group`) instances """ return self._list(_group.Group, **query) def create_group(self, **attrs): """Create a new group from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`, comprised of the properties on the Group class. :returns: The results of group creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` """ return self._create( _group.Group, prepend_key=False, **attrs ) def update_group(self, group, **attrs): """update group with attributes :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`, comprised of the properties on the Group class. :returns: The results of group creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` """ return self._update( _group.Group, group, prepend_key=False, **attrs) def get_group(self, group): """Get a group :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :returns: Group instance :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` """ return self._get( _group.Group, group, ) def delete_group(self, group, ignore_missing=True): """Delete a group :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent group. """ return self._delete( _group.Group, group, ignore_missing=ignore_missing, ) def find_group(self, name_or_id, ignore_missing=True): """Find a single group :param name_or_id: The name or ID of a group :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the group does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent group. :returns: ``None`` """ return self._find( _group.Group, name_or_id, ignore_missing=ignore_missing, ) def resume_group(self, group): """resume group :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. """ group = self._get_resource( _group.Group, group) return group.resume(self) def pause_group(self, group): """pause group :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. """ group = self._get_resource( _group.Group, group ) return group.pause(self) # ======== Configurations ======== def configs(self, **query): """Retrieve a generator of configs :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``name``: configuration name * ``image_id``: image id * ``marker``: pagination marker * ``limit``: pagination limit :returns: A generator of config (:class:`~otcextensions.sdk.auto_scaling.v1.config.Config`) instances """ return self._list(_config.Config, **query) def create_config(self, name, **attrs): """Create a new config from config name and instance-config attributes :param name: auto scaling config name :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.config.InstanceConfig` , comprised of the properties on the InstanceConfig class. :returns: The results of config creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` """ return self._create( _config.Config, prepend_key=False, name=name, **attrs ) def get_config(self, config): """Get a config :param config: The value can be the ID of a config or a :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` instance. :returns: Config instance :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` """ return self._get(_config.Config, config) # Name is not unique, so find might return multiple results def find_config(self, name_or_id, ignore_missing=True): """Get a config :param name_or_id: The name or ID of a config :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the config does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent config. :returns: Config instance :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` """ return self._find( _config.Config, name_or_id, ignore_missing=ignore_missing, ) def delete_config(self, config, ignore_missing=True): """Delete a config :param config: The value can be the ID of a config or a :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the config does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent config. :returns: Config been deleted :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` """ return self._delete( _config.Config, config, ignore_missing=ignore_missing, ) def batch_delete_configs(self, configs): """batch delete configs :param configs: The list item value can be the ID of a config or a :class:`~otcextensions.sdk.auto_scaling.v1.config.Config` instance. """ config = _config.Config() return config.batch_delete(self, configs) # ======== Policy ======== def policies(self, group, **query): """Retrieve a generator of policies :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``name``: policy name * ``type``: policy type * ``scaling_group_id``: scaling group id the policy applied to * ``marker``: pagination marker * ``limit``: pagination limit :returns: A generator of policy (:class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`) instances """ group = self._get_resource(_group.Group, group) return self._list( _policy.Policy, base_path='/scaling_policy/{id}/list'.format(id=group.id), **query) def create_policy(self, **attrs): """Create a new policy from attributes :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`, comprised of the properties on the Policy class. :returns: The results of policy creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` """ return self._create(_policy.Policy, prepend_key=False, **attrs) def update_policy(self, policy, **attrs): """update policy with attributes :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. :param dict attrs: Keyword arguments which will be used to create a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`, comprised of the properties on the Policy class. :returns: The results of policy creation :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` """ return self._update(_policy.Policy, policy, prepend_key=False, **attrs) def get_policy(self, policy): """Get a policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. :returns: Policy instance :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` """ return self._get(_policy.Policy, policy) def delete_policy(self, policy, ignore_missing=True): """Delete a policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent policy. :returns: Policy been deleted :rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` """ return self._delete(_policy.Policy, policy, ignore_missing=ignore_missing) def find_policy(self, name_or_id, group, ignore_missing=True): """Find a single policy :param name_or_id: The name or ID of a policy :param group: ID of a group :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent policy. :returns: ``None`` """ group = self._get_resource(_group.Group, group) return self._find(_policy.Policy, name_or_id, ignore_missing=ignore_missing, group_id=group.id) def execute_policy(self, policy): """execute policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. """ policy = self._get_resource(_policy.Policy, policy) policy.execute(self) def resume_policy(self, policy): """resume policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. """ policy = self._get_resource(_policy.Policy, policy) policy.resume(self) def pause_policy(self, policy): """pause policy :param policy: The value can be the ID of a policy or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy` instance. """ policy = self._get_resource(_policy.Policy, policy) policy.pause(self) # ======== Instances ======== def instances(self, group, **query): """Retrieve a generator of instances :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``health_status``: instance health status * ``lifecycle_status``: policy type * ``scaling_group_id``: scaling group id the policy applied to * ``marker``: pagination marker * ``limit``: pagination limit :returns: A generator of instances with type (:class:`~otcextensions.sdk.auto_scaling.v1.instance.Instance`) """ group = self._get_resource(_group.Group, group) return self._list( _instance.Instance, base_path='/scaling_group_instance/{id}/list'.format(id=group.id), **query) def remove_instance(self, instance, delete_instance=False, ignore_missing=True): """Remove an instance of auto scaling group :precondition: * the instance must in ``INSERVICE`` status * after remove the instance number of auto scaling group should not be less than min instance number * The own auto scaling group should not in scaling status :param instance: The value can be the ID of a instance or a :class:`~otcextensions.sdk.auto_scaling.v1.instance.Instance` instance. :param bool delete_instance: When set to ``True``, instance will be deleted after removed. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the config does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent config. :returns: None """ instance = self._get_resource(_instance.Instance, instance) return instance.remove(self, delete_instance=delete_instance, ignore_missing=ignore_missing) def batch_instance_action( self, group, instances, action, delete_instance=False): """Batch add instances for auto scaling group :param group: The group which instances will be added to, The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param instances: The list item value can be ID of an instance or a :class:`~otcextensions.sdk.auto_scaling.v1.instance.Instance` instance :param action: Action type [``ADD``, ``REMOVE``, ``PROTECT``, ``UNPROTECT``] :param delete_instance: When set to ``True``, instance will be deleted after removed """ group = self._get_resource(_group.Group, group) instance = _instance.Instance(scaling_group_id=group.id) return instance.batch_action(self, instances, action, delete_instance) # ======== Activities ======== def activities(self, group, **query): """Retrieve a generator of Activity :param group: The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :param dict query: Optional query parameters to be sent to limit the resources being returned. * ``start_time``: activity start time * ``end_time``: activity end time * ``marker``: pagination marker, known as ``start_number`` * ``limit``: pagination limit :returns: A generator of group (:class:`~otcextensions.sdk.auto_scaling.v1.activity.Activity`) instances """ group = self._get_resource(_group.Group, group) return self._list(_activity.Activity, scaling_group_id=group.id, **query) # ======== Quotas ======== def quotas(self, group=None): """Retrieve a generator of Quota :param group: If group is set, will query quota for the group instead of quota of user. The value can be the ID of a group or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group` instance. :returns: A generator of quota (:class:`~otcextensions.sdk.auto_scaling.v1.quota.Quota`) instances """ if group: group = self._get_resource(_group.Group, group) return self._list(_quota.ScalingQuota, paginated=False, scaling_group_id=group.id) else: return self._list(_quota.Quota, paginated=False)
0.868158
0.30551
def mined_sentences(alignments, scores, src_file, tgt_file, src_file_lines, tgt_file_lines): with open(src_file, encoding='utf-8-sig') as source: source_lines_prep = source.readlines() with open(tgt_file, encoding='utf-8-sig') as target: target_lines_prep = target.readlines() with open(src_file_lines, encoding='utf-8-sig') as source: source_lines = source.readlines() with open(tgt_file_lines, encoding='utf-8-sig') as target: target_lines = target.readlines() mined_source_prep = open(src_file[:-9] + "_mined.txt", "w", encoding='utf-8-sig') mined_target_prep = open(tgt_file[:-9] + "_mined.txt", "w", encoding='utf-8-sig') mined_source = open(src_file[:-9] + "_mined_lines.txt", "w", encoding='utf-8-sig') mined_target = open(tgt_file[:-9] + "_mined_lines.txt", "w", encoding='utf-8-sig') accepted = open(src_file[:-9] + "_accepted.txt", "w", encoding='utf-8-sig') rejected = open(src_file[:-9] + "_rejected.txt", "w", encoding='utf-8-sig') for i in range(len(alignments)): if scores[i] > 0.0: line_source = [] line_target = [] for j in range(len(alignments[i][0])): mined_source_prep.write(source_lines_prep[alignments[i][0][j]].strip("\n")) # А нормальные строки записываем сначала в список, чтобы потом записать элементы через разделитель line_source.append(source_lines[alignments[i][0][j]].strip("\n")) mined_source.write(" ".join(line_source)) accepted.write(" ".join(line_source)) if i < len(alignments)-1: mined_source_prep.write("\n") mined_source.write("\n") for k in range(len(alignments[i][1])): mined_target_prep.write(target_lines_prep[alignments[i][1][k]].strip("\n")) # А нормальные строки записываем сначала в список, чтобы потом записать элементы через разделитель line_target.append(target_lines[alignments[i][1][k]].strip("\n")) mined_target.write(" ".join(line_target)) accepted.write("\t" + " ".join(line_target) + "\t" + str(scores[i])) if i < len(alignments)-1: mined_target_prep.write("\n") mined_target.write("\n") accepted.write("\n") else: line_source = [] line_target = [] for j in range(len(alignments[i][0])): # А нормальные строки записываем сначала в список, чтобы потом записать элементы через разделитель line_source.append(source_lines[alignments[i][0][j]].strip("\n")) rejected.write(" ".join(line_source)) for k in range(len(alignments[i][1])): # А нормальные строки записываем сначала в список, чтобы потом записать элементы через разделитель line_target.append(target_lines[alignments[i][1][k]].strip("\n")) rejected.write("\t" + " ".join(line_target) + "\t" + str(scores[i])) if i < len(alignments)-1: rejected.write("\n") mined_source_prep.close() mined_target_prep.close() mined_source.close() mined_target.close() accepted.close() rejected.close()
extract_sentences.py
def mined_sentences(alignments, scores, src_file, tgt_file, src_file_lines, tgt_file_lines): with open(src_file, encoding='utf-8-sig') as source: source_lines_prep = source.readlines() with open(tgt_file, encoding='utf-8-sig') as target: target_lines_prep = target.readlines() with open(src_file_lines, encoding='utf-8-sig') as source: source_lines = source.readlines() with open(tgt_file_lines, encoding='utf-8-sig') as target: target_lines = target.readlines() mined_source_prep = open(src_file[:-9] + "_mined.txt", "w", encoding='utf-8-sig') mined_target_prep = open(tgt_file[:-9] + "_mined.txt", "w", encoding='utf-8-sig') mined_source = open(src_file[:-9] + "_mined_lines.txt", "w", encoding='utf-8-sig') mined_target = open(tgt_file[:-9] + "_mined_lines.txt", "w", encoding='utf-8-sig') accepted = open(src_file[:-9] + "_accepted.txt", "w", encoding='utf-8-sig') rejected = open(src_file[:-9] + "_rejected.txt", "w", encoding='utf-8-sig') for i in range(len(alignments)): if scores[i] > 0.0: line_source = [] line_target = [] for j in range(len(alignments[i][0])): mined_source_prep.write(source_lines_prep[alignments[i][0][j]].strip("\n")) # А нормальные строки записываем сначала в список, чтобы потом записать элементы через разделитель line_source.append(source_lines[alignments[i][0][j]].strip("\n")) mined_source.write(" ".join(line_source)) accepted.write(" ".join(line_source)) if i < len(alignments)-1: mined_source_prep.write("\n") mined_source.write("\n") for k in range(len(alignments[i][1])): mined_target_prep.write(target_lines_prep[alignments[i][1][k]].strip("\n")) # А нормальные строки записываем сначала в список, чтобы потом записать элементы через разделитель line_target.append(target_lines[alignments[i][1][k]].strip("\n")) mined_target.write(" ".join(line_target)) accepted.write("\t" + " ".join(line_target) + "\t" + str(scores[i])) if i < len(alignments)-1: mined_target_prep.write("\n") mined_target.write("\n") accepted.write("\n") else: line_source = [] line_target = [] for j in range(len(alignments[i][0])): # А нормальные строки записываем сначала в список, чтобы потом записать элементы через разделитель line_source.append(source_lines[alignments[i][0][j]].strip("\n")) rejected.write(" ".join(line_source)) for k in range(len(alignments[i][1])): # А нормальные строки записываем сначала в список, чтобы потом записать элементы через разделитель line_target.append(target_lines[alignments[i][1][k]].strip("\n")) rejected.write("\t" + " ".join(line_target) + "\t" + str(scores[i])) if i < len(alignments)-1: rejected.write("\n") mined_source_prep.close() mined_target_prep.close() mined_source.close() mined_target.close() accepted.close() rejected.close()
0.071607
0.279232
from json import JSONDecodeError from queue import Queue from random import choice from re import fullmatch from string import ascii_lowercase from threading import Thread from time import sleep, time from thingsboard_gateway.tb_utility.tb_loader import TBModuleLoader from thingsboard_gateway.tb_utility.tb_utility import TBUtility try: from requests import Timeout, request except ImportError: print("Requests library not found - installing...") TBUtility.install_package("requests") from requests import Timeout, request import requests from requests.auth import HTTPBasicAuth from requests.exceptions import RequestException from thingsboard_gateway.connectors.connector import Connector, log from thingsboard_gateway.connectors.request.json_request_uplink_converter import JsonRequestUplinkConverter from thingsboard_gateway.connectors.request.json_request_downlink_converter import JsonRequestDownlinkConverter # pylint: disable=E1101 requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':ADH-AES128-SHA256' class RequestConnector(Connector, Thread): def __init__(self, gateway, config, connector_type): super().__init__() self.statistics = {'MessagesReceived': 0, 'MessagesSent': 0} self.__rpc_requests = [] self.__config = config self.__connector_type = connector_type self.__gateway = gateway self.__security = HTTPBasicAuth(self.__config["security"]["username"], self.__config["security"]["password"]) if \ self.__config["security"]["type"] == "basic" else None self.__host = None self.__service_headers = {} if "http://" in self.__config["host"].lower() or "https://" in self.__config["host"].lower(): self.__host = self.__config["host"] else: self.__host = "http://" + self.__config["host"] self.__ssl_verify = self.__config.get("SSLVerify", False) self.setName(self.__config.get("name", "".join(choice(ascii_lowercase) for _ in range(5)))) self.daemon = True self.__connected = False self.__stopped = False self.__requests_in_progress = [] self.__convert_queue = Queue(1000000) self.__attribute_updates = [] self.__fill_attribute_updates() self.__fill_rpc_requests() self.__fill_requests() def run(self): while not self.__stopped: if self.__requests_in_progress: for request in self.__requests_in_progress: if time() >= request["next_time"]: thread = Thread(target=self.__send_request, args=(request, self.__convert_queue, log), daemon=True, name="Request to endpoint \'%s\' Thread" % (request["config"].get("url"))) thread.start() else: sleep(.1) self.__process_data() def on_attributes_update(self, content): try: for attribute_request in self.__attribute_updates: if fullmatch(attribute_request["deviceNameFilter"], content["device"]) and fullmatch( attribute_request["attributeFilter"], list(content["data"].keys())[0]): converted_data = attribute_request["converter"].convert(attribute_request, content) response_queue = Queue(1) request_dict = {"config": {**attribute_request, **converted_data}, "request": request} attribute_update_request_thread = Thread(target=self.__send_request, args=(request_dict, response_queue, log), daemon=True, name="Attribute request to %s" % (converted_data["url"])) attribute_update_request_thread.start() attribute_update_request_thread.join() if not response_queue.empty(): response = response_queue.get_nowait() log.debug(response) del response_queue except Exception as e: log.exception(e) def server_side_rpc_handler(self, content): try: for rpc_request in self.__rpc_requests: if fullmatch(rpc_request["deviceNameFilter"], content["device"]) and fullmatch( rpc_request["methodFilter"], content["data"]["method"]): converted_data = rpc_request["converter"].convert(rpc_request, content) response_queue = Queue(1) request_dict = {"config": {**rpc_request, **converted_data}, "request": request} request_dict["config"].get("uplink_converter") rpc_request_thread = Thread(target=self.__send_request, args=(request_dict, response_queue, log), daemon=True, name="RPC request to %s" % (converted_data["url"])) rpc_request_thread.start() rpc_request_thread.join() if not response_queue.empty(): response = response_queue.get_nowait() log.debug(response) self.__gateway.send_rpc_reply(device=content["device"], req_id=content["data"]["id"], content=response[2]) self.__gateway.send_rpc_reply(device=content["device"], req_id=content["data"]["id"], success_sent=True) del response_queue except Exception as e: log.exception(e) def __fill_requests(self): log.debug(self.__config["mapping"]) for endpoint in self.__config["mapping"]: try: log.debug(endpoint) converter = None if endpoint["converter"]["type"] == "custom": module = TBModuleLoader.import_module(self.__connector_type, endpoint["converter"]["extension"]) if module is not None: log.debug('Custom converter for url %s - found!', endpoint["url"]) converter = module(endpoint) else: log.error("\n\nCannot find extension module for %s url.\nPlease check your configuration.\n", endpoint["url"]) else: converter = JsonRequestUplinkConverter(endpoint) self.__requests_in_progress.append({"config": endpoint, "converter": converter, "next_time": time(), "request": request}) except Exception as e: log.exception(e) def __fill_attribute_updates(self): for attribute_request in self.__config.get("attributeUpdates", []): if attribute_request.get("converter") is not None: converter = TBModuleLoader.import_module("request", attribute_request["converter"])(attribute_request) else: converter = JsonRequestDownlinkConverter(attribute_request) attribute_request_dict = {**attribute_request, "converter": converter} self.__attribute_updates.append(attribute_request_dict) def __fill_rpc_requests(self): for rpc_request in self.__config.get("serverSideRpc", []): if rpc_request.get("converter") is not None: converter = TBModuleLoader.import_module("request", rpc_request["converter"])(rpc_request) else: converter = JsonRequestDownlinkConverter(rpc_request) rpc_request_dict = {**rpc_request, "converter": converter} self.__rpc_requests.append(rpc_request_dict) def __send_request(self, request, converter_queue, logger): url = "" try: request["next_time"] = time() + request["config"].get("scanPeriod", 10) request_url_from_config = request["config"]["url"] request_url_from_config = str('/' + request_url_from_config) if request_url_from_config[ 0] != '/' else request_url_from_config logger.debug(request_url_from_config) url = self.__host + request_url_from_config logger.debug(url) request_timeout = request["config"].get("timeout", 1) params = { "method": request["config"].get("httpMethod", "GET"), "url": url, "timeout": request_timeout, "allow_redirects": request["config"].get("allowRedirects", False), "verify": self.__ssl_verify, "auth": self.__security, "data": request["config"].get("data", {}) } logger.debug(url) if request["config"].get("httpHeaders") is not None: params["headers"] = request["config"]["httpHeaders"] logger.debug("Request to %s will be sent", url) response = request["request"](**params) if response and response.ok: if not converter_queue.full(): data_to_storage = [url, request["converter"]] try: data_to_storage.append(response.json()) except UnicodeDecodeError: data_to_storage.append(response.content()) except JSONDecodeError: data_to_storage.append(response.content()) if len(data_to_storage) == 3: self.__convert_data(data_to_storage) self.statistics["MessagesReceived"] = self.statistics["MessagesReceived"] + 1 else: logger.error("Request to URL: %s finished with code: %i", url, response.status_code) except Timeout: logger.error("Timeout error on request %s.", url) except RequestException as e: logger.error("Cannot connect to %s. Connection error.", url) logger.debug(e) except ConnectionError: logger.error("Cannot connect to %s. Connection error.", url) except Exception as e: logger.exception(e) def __convert_data(self, data): try: url, converter, data = data data_to_send = {} if isinstance(data, list): for data_item in data: self.__add_ts(data_item) converted_data = converter.convert(url, data_item) if data_to_send.get(converted_data["deviceName"]) is None: data_to_send[converted_data["deviceName"]] = converted_data else: if converted_data["telemetry"]: data_to_send[converted_data["deviceName"]]["telemetry"].append( converted_data["telemetry"][0]) if converted_data["attributes"]: data_to_send[converted_data["deviceName"]]["attributes"].append( converted_data["attributes"][0]) else: self.__add_ts(data) data_to_send = converter.convert(url, data) self.__convert_queue.put(data_to_send) except Exception as e: log.exception(e) def __add_ts(self, data): if data.get("ts") is None: data["ts"] = time() * 1000 def __process_data(self): try: if not self.__convert_queue.empty(): data = self.__convert_queue.get() self.__gateway.send_to_storage(self.get_name(), data) self.statistics["MessagesSent"] = self.statistics["MessagesSent"] + 1 except Exception as e: log.exception(e) def get_name(self): return self.name def is_connected(self): return self.__connected def open(self): self.__stopped = False self.start() def close(self): self.__stopped = True
thingsboard_gateway/connectors/request/request_connector.py
from json import JSONDecodeError from queue import Queue from random import choice from re import fullmatch from string import ascii_lowercase from threading import Thread from time import sleep, time from thingsboard_gateway.tb_utility.tb_loader import TBModuleLoader from thingsboard_gateway.tb_utility.tb_utility import TBUtility try: from requests import Timeout, request except ImportError: print("Requests library not found - installing...") TBUtility.install_package("requests") from requests import Timeout, request import requests from requests.auth import HTTPBasicAuth from requests.exceptions import RequestException from thingsboard_gateway.connectors.connector import Connector, log from thingsboard_gateway.connectors.request.json_request_uplink_converter import JsonRequestUplinkConverter from thingsboard_gateway.connectors.request.json_request_downlink_converter import JsonRequestDownlinkConverter # pylint: disable=E1101 requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':ADH-AES128-SHA256' class RequestConnector(Connector, Thread): def __init__(self, gateway, config, connector_type): super().__init__() self.statistics = {'MessagesReceived': 0, 'MessagesSent': 0} self.__rpc_requests = [] self.__config = config self.__connector_type = connector_type self.__gateway = gateway self.__security = HTTPBasicAuth(self.__config["security"]["username"], self.__config["security"]["password"]) if \ self.__config["security"]["type"] == "basic" else None self.__host = None self.__service_headers = {} if "http://" in self.__config["host"].lower() or "https://" in self.__config["host"].lower(): self.__host = self.__config["host"] else: self.__host = "http://" + self.__config["host"] self.__ssl_verify = self.__config.get("SSLVerify", False) self.setName(self.__config.get("name", "".join(choice(ascii_lowercase) for _ in range(5)))) self.daemon = True self.__connected = False self.__stopped = False self.__requests_in_progress = [] self.__convert_queue = Queue(1000000) self.__attribute_updates = [] self.__fill_attribute_updates() self.__fill_rpc_requests() self.__fill_requests() def run(self): while not self.__stopped: if self.__requests_in_progress: for request in self.__requests_in_progress: if time() >= request["next_time"]: thread = Thread(target=self.__send_request, args=(request, self.__convert_queue, log), daemon=True, name="Request to endpoint \'%s\' Thread" % (request["config"].get("url"))) thread.start() else: sleep(.1) self.__process_data() def on_attributes_update(self, content): try: for attribute_request in self.__attribute_updates: if fullmatch(attribute_request["deviceNameFilter"], content["device"]) and fullmatch( attribute_request["attributeFilter"], list(content["data"].keys())[0]): converted_data = attribute_request["converter"].convert(attribute_request, content) response_queue = Queue(1) request_dict = {"config": {**attribute_request, **converted_data}, "request": request} attribute_update_request_thread = Thread(target=self.__send_request, args=(request_dict, response_queue, log), daemon=True, name="Attribute request to %s" % (converted_data["url"])) attribute_update_request_thread.start() attribute_update_request_thread.join() if not response_queue.empty(): response = response_queue.get_nowait() log.debug(response) del response_queue except Exception as e: log.exception(e) def server_side_rpc_handler(self, content): try: for rpc_request in self.__rpc_requests: if fullmatch(rpc_request["deviceNameFilter"], content["device"]) and fullmatch( rpc_request["methodFilter"], content["data"]["method"]): converted_data = rpc_request["converter"].convert(rpc_request, content) response_queue = Queue(1) request_dict = {"config": {**rpc_request, **converted_data}, "request": request} request_dict["config"].get("uplink_converter") rpc_request_thread = Thread(target=self.__send_request, args=(request_dict, response_queue, log), daemon=True, name="RPC request to %s" % (converted_data["url"])) rpc_request_thread.start() rpc_request_thread.join() if not response_queue.empty(): response = response_queue.get_nowait() log.debug(response) self.__gateway.send_rpc_reply(device=content["device"], req_id=content["data"]["id"], content=response[2]) self.__gateway.send_rpc_reply(device=content["device"], req_id=content["data"]["id"], success_sent=True) del response_queue except Exception as e: log.exception(e) def __fill_requests(self): log.debug(self.__config["mapping"]) for endpoint in self.__config["mapping"]: try: log.debug(endpoint) converter = None if endpoint["converter"]["type"] == "custom": module = TBModuleLoader.import_module(self.__connector_type, endpoint["converter"]["extension"]) if module is not None: log.debug('Custom converter for url %s - found!', endpoint["url"]) converter = module(endpoint) else: log.error("\n\nCannot find extension module for %s url.\nPlease check your configuration.\n", endpoint["url"]) else: converter = JsonRequestUplinkConverter(endpoint) self.__requests_in_progress.append({"config": endpoint, "converter": converter, "next_time": time(), "request": request}) except Exception as e: log.exception(e) def __fill_attribute_updates(self): for attribute_request in self.__config.get("attributeUpdates", []): if attribute_request.get("converter") is not None: converter = TBModuleLoader.import_module("request", attribute_request["converter"])(attribute_request) else: converter = JsonRequestDownlinkConverter(attribute_request) attribute_request_dict = {**attribute_request, "converter": converter} self.__attribute_updates.append(attribute_request_dict) def __fill_rpc_requests(self): for rpc_request in self.__config.get("serverSideRpc", []): if rpc_request.get("converter") is not None: converter = TBModuleLoader.import_module("request", rpc_request["converter"])(rpc_request) else: converter = JsonRequestDownlinkConverter(rpc_request) rpc_request_dict = {**rpc_request, "converter": converter} self.__rpc_requests.append(rpc_request_dict) def __send_request(self, request, converter_queue, logger): url = "" try: request["next_time"] = time() + request["config"].get("scanPeriod", 10) request_url_from_config = request["config"]["url"] request_url_from_config = str('/' + request_url_from_config) if request_url_from_config[ 0] != '/' else request_url_from_config logger.debug(request_url_from_config) url = self.__host + request_url_from_config logger.debug(url) request_timeout = request["config"].get("timeout", 1) params = { "method": request["config"].get("httpMethod", "GET"), "url": url, "timeout": request_timeout, "allow_redirects": request["config"].get("allowRedirects", False), "verify": self.__ssl_verify, "auth": self.__security, "data": request["config"].get("data", {}) } logger.debug(url) if request["config"].get("httpHeaders") is not None: params["headers"] = request["config"]["httpHeaders"] logger.debug("Request to %s will be sent", url) response = request["request"](**params) if response and response.ok: if not converter_queue.full(): data_to_storage = [url, request["converter"]] try: data_to_storage.append(response.json()) except UnicodeDecodeError: data_to_storage.append(response.content()) except JSONDecodeError: data_to_storage.append(response.content()) if len(data_to_storage) == 3: self.__convert_data(data_to_storage) self.statistics["MessagesReceived"] = self.statistics["MessagesReceived"] + 1 else: logger.error("Request to URL: %s finished with code: %i", url, response.status_code) except Timeout: logger.error("Timeout error on request %s.", url) except RequestException as e: logger.error("Cannot connect to %s. Connection error.", url) logger.debug(e) except ConnectionError: logger.error("Cannot connect to %s. Connection error.", url) except Exception as e: logger.exception(e) def __convert_data(self, data): try: url, converter, data = data data_to_send = {} if isinstance(data, list): for data_item in data: self.__add_ts(data_item) converted_data = converter.convert(url, data_item) if data_to_send.get(converted_data["deviceName"]) is None: data_to_send[converted_data["deviceName"]] = converted_data else: if converted_data["telemetry"]: data_to_send[converted_data["deviceName"]]["telemetry"].append( converted_data["telemetry"][0]) if converted_data["attributes"]: data_to_send[converted_data["deviceName"]]["attributes"].append( converted_data["attributes"][0]) else: self.__add_ts(data) data_to_send = converter.convert(url, data) self.__convert_queue.put(data_to_send) except Exception as e: log.exception(e) def __add_ts(self, data): if data.get("ts") is None: data["ts"] = time() * 1000 def __process_data(self): try: if not self.__convert_queue.empty(): data = self.__convert_queue.get() self.__gateway.send_to_storage(self.get_name(), data) self.statistics["MessagesSent"] = self.statistics["MessagesSent"] + 1 except Exception as e: log.exception(e) def get_name(self): return self.name def is_connected(self): return self.__connected def open(self): self.__stopped = False self.start() def close(self): self.__stopped = True
0.288068
0.049543
# standard library import json from typing import Optional, Union # first-party from tcex.api.tc.v3.api_endpoints import ApiEndpoints from tcex.api.tc.v3.object_abc import ObjectABC from tcex.api.tc.v3.object_collection_abc import ObjectCollectionABC from tcex.api.tc.v3.tags.tag_filter import TagFilter from tcex.api.tc.v3.tags.tag_model import TagModel, TagsModel class Tags(ObjectCollectionABC): """Tags Collection. # Example of params input { 'result_limit': 100, # Limit the retrieved results. 'result_start': 10, # Starting count used for pagination. 'fields': ['caseId', 'summary'] # Select additional return fields. } Args: session (Session): Session object configured with TC API Auth. tql_filters (list): List of TQL filters. params (dict): Additional query params (see example above). """ def __init__(self, **kwargs) -> None: """Initialize class properties.""" super().__init__( kwargs.pop('session', None), kwargs.pop('tql_filter', None), kwargs.pop('params', None) ) self._model = TagsModel(**kwargs) self.type_ = 'tags' def __iter__(self) -> 'Tag': """Iterate over CM objects.""" return self.iterate(base_class=Tag) @property def _api_endpoint(self) -> str: """Return the type specific API endpoint.""" return ApiEndpoints.TAGS.value @property def filter(self) -> 'TagFilter': """Return the type specific filter object.""" return TagFilter(self.tql) class Tag(ObjectABC): """Tags Object. Args: description (str, kwargs): A brief description of the Tag. name (str, kwargs): The **name** for the Tag. owner (str, kwargs): The name of the Owner of the Tag. """ def __init__(self, **kwargs) -> None: """Initialize class properties.""" super().__init__(kwargs.pop('session', None)) # properties self._model = TagModel(**kwargs) self._nested_field_name = 'tags' self._nested_filter = 'has_tag' self.type_ = 'Tag' @property def _api_endpoint(self) -> str: """Return the type specific API endpoint.""" return ApiEndpoints.TAGS.value @property def model(self) -> 'TagModel': """Return the model data.""" return self._model @model.setter def model(self, data: Union['TagModel', dict]) -> None: """Create model using the provided data.""" if isinstance(data, type(self.model)): # provided data is already a model, nothing required to change self._model = data elif isinstance(data, dict): # provided data is raw response, load the model self._model = type(self.model)(**data) else: raise RuntimeError(f'Invalid data type: {type(data)} provided.') def remove(self, params: Optional[dict] = None) -> None: """Remove a nested object.""" method = 'PUT' unique_id = self._calculate_unique_id() # validate an id is available self._validate_id(unique_id.get('value'), '') body = json.dumps( { self._nested_field_name: { 'data': [{unique_id.get('filter'): unique_id.get('value')}], 'mode': 'delete', } } ) # get the unique id value for id, xid, summary, etc ... parent_api_endpoint = self._parent_data.get('api_endpoint') parent_unique_id = self._parent_data.get('unique_id') url = f'{parent_api_endpoint}/{parent_unique_id}' # validate parent an id is available self._validate_id(parent_unique_id, url) self._request( method=method, url=url, body=body, headers={'content-type': 'application/json'}, params=params, ) return self.request
tcex/api/tc/v3/tags/tag.py
# standard library import json from typing import Optional, Union # first-party from tcex.api.tc.v3.api_endpoints import ApiEndpoints from tcex.api.tc.v3.object_abc import ObjectABC from tcex.api.tc.v3.object_collection_abc import ObjectCollectionABC from tcex.api.tc.v3.tags.tag_filter import TagFilter from tcex.api.tc.v3.tags.tag_model import TagModel, TagsModel class Tags(ObjectCollectionABC): """Tags Collection. # Example of params input { 'result_limit': 100, # Limit the retrieved results. 'result_start': 10, # Starting count used for pagination. 'fields': ['caseId', 'summary'] # Select additional return fields. } Args: session (Session): Session object configured with TC API Auth. tql_filters (list): List of TQL filters. params (dict): Additional query params (see example above). """ def __init__(self, **kwargs) -> None: """Initialize class properties.""" super().__init__( kwargs.pop('session', None), kwargs.pop('tql_filter', None), kwargs.pop('params', None) ) self._model = TagsModel(**kwargs) self.type_ = 'tags' def __iter__(self) -> 'Tag': """Iterate over CM objects.""" return self.iterate(base_class=Tag) @property def _api_endpoint(self) -> str: """Return the type specific API endpoint.""" return ApiEndpoints.TAGS.value @property def filter(self) -> 'TagFilter': """Return the type specific filter object.""" return TagFilter(self.tql) class Tag(ObjectABC): """Tags Object. Args: description (str, kwargs): A brief description of the Tag. name (str, kwargs): The **name** for the Tag. owner (str, kwargs): The name of the Owner of the Tag. """ def __init__(self, **kwargs) -> None: """Initialize class properties.""" super().__init__(kwargs.pop('session', None)) # properties self._model = TagModel(**kwargs) self._nested_field_name = 'tags' self._nested_filter = 'has_tag' self.type_ = 'Tag' @property def _api_endpoint(self) -> str: """Return the type specific API endpoint.""" return ApiEndpoints.TAGS.value @property def model(self) -> 'TagModel': """Return the model data.""" return self._model @model.setter def model(self, data: Union['TagModel', dict]) -> None: """Create model using the provided data.""" if isinstance(data, type(self.model)): # provided data is already a model, nothing required to change self._model = data elif isinstance(data, dict): # provided data is raw response, load the model self._model = type(self.model)(**data) else: raise RuntimeError(f'Invalid data type: {type(data)} provided.') def remove(self, params: Optional[dict] = None) -> None: """Remove a nested object.""" method = 'PUT' unique_id = self._calculate_unique_id() # validate an id is available self._validate_id(unique_id.get('value'), '') body = json.dumps( { self._nested_field_name: { 'data': [{unique_id.get('filter'): unique_id.get('value')}], 'mode': 'delete', } } ) # get the unique id value for id, xid, summary, etc ... parent_api_endpoint = self._parent_data.get('api_endpoint') parent_unique_id = self._parent_data.get('unique_id') url = f'{parent_api_endpoint}/{parent_unique_id}' # validate parent an id is available self._validate_id(parent_unique_id, url) self._request( method=method, url=url, body=body, headers={'content-type': 'application/json'}, params=params, ) return self.request
0.91602
0.225833
import sys, numpy from numpy import sin, cos, log10, log2, sqrt, pi from scipy.special import jv as besselj sys.path.insert(0,'../Stage_0/') from conversions import * #==================================================================== # aux thruster information #==================================================================== class prop_sizing: def __init__(self, data): self.num = 0 #data['npropeller'] self.thrust = 0.e0 self.power = 0.e0 #==================================================================== # aircraft properties (distributed to rotor, wing, etc later) #==================================================================== class aircraft: def __init__(self, adict): #==================================================================== # big-picture items #==================================================================== self.aircraftid = int(adict['aircraftID']) #==================================================================== # Mission #==================================================================== self.masscrew = adict['mass_crew'] self.common_equip_wt = adict['mass_common_equip'] # self.fuselagewidth = adict['fuselage_width'] # self.clearance = adict['clearance'] #==================================================================== # rotor #==================================================================== self.nrotor = int(adict['nrotor']) self.roffset = adict['rotor_offset'] self.rotorshafttilt = adict['rotor_shaft_tilt'] #==================================================================== # propeller #==================================================================== self.npropeller = adict['npropeller'] #==================================================================== # Engine #==================================================================== self.nengine = int(adict['nengine']) #==================================================================== # physical constants #==================================================================== class constants: def __init__(self): self.grav = 9.80665e0 # gravitation constant (m/s2) self.rho0 = 1.2256e0 # standard air density (kg/m3) self.f2m = 0.3048e0 # conversion from feet to meter self.m2f = 3.28084e0 # conversion from meter to feet self.hp2kw = 0.7457e0 # conversion from hp to KW self.kw2hp = 1.3410e0 # conversion from KW to hp self.kts2mps = 0.5144e0 # conversion from knots to m/s self.mps2kts = 1.94401e0 # conversion from m/s to knots self.lb2kg = 0.45359e0 # conversion from lb to kg self.kg2lb = 2.20462e0 # conversion from kg to lb self.mps2kph = 3.6e0 # conversion from m/s to km/hr self.hr2sec = 3600e0 # conversion from hour to sec self.sec2hr = 2.7777e-4 # conversion from sec to hour self.rpm2rps = 0.404719e0 # conversion from RPM to rad/sec self.in2m = 0.0254e0 # conversion from inch to m self.m2in = 3.93700e+1 # conversion from m to inch self.pi = 3.14159265e0 # conversion from m to inch self.min2sec = 60.0e0 # conversion from mins to seconds self.sec2min = 1.6666666e-2 # conversion from secs to mins self.nm2m = 1852.e0 # conversion from nautical miles to meters self.km2nm = 0.539957e0 # conversion from kms to nautical miles
src/Python/Stage_1/component_classes.py
import sys, numpy from numpy import sin, cos, log10, log2, sqrt, pi from scipy.special import jv as besselj sys.path.insert(0,'../Stage_0/') from conversions import * #==================================================================== # aux thruster information #==================================================================== class prop_sizing: def __init__(self, data): self.num = 0 #data['npropeller'] self.thrust = 0.e0 self.power = 0.e0 #==================================================================== # aircraft properties (distributed to rotor, wing, etc later) #==================================================================== class aircraft: def __init__(self, adict): #==================================================================== # big-picture items #==================================================================== self.aircraftid = int(adict['aircraftID']) #==================================================================== # Mission #==================================================================== self.masscrew = adict['mass_crew'] self.common_equip_wt = adict['mass_common_equip'] # self.fuselagewidth = adict['fuselage_width'] # self.clearance = adict['clearance'] #==================================================================== # rotor #==================================================================== self.nrotor = int(adict['nrotor']) self.roffset = adict['rotor_offset'] self.rotorshafttilt = adict['rotor_shaft_tilt'] #==================================================================== # propeller #==================================================================== self.npropeller = adict['npropeller'] #==================================================================== # Engine #==================================================================== self.nengine = int(adict['nengine']) #==================================================================== # physical constants #==================================================================== class constants: def __init__(self): self.grav = 9.80665e0 # gravitation constant (m/s2) self.rho0 = 1.2256e0 # standard air density (kg/m3) self.f2m = 0.3048e0 # conversion from feet to meter self.m2f = 3.28084e0 # conversion from meter to feet self.hp2kw = 0.7457e0 # conversion from hp to KW self.kw2hp = 1.3410e0 # conversion from KW to hp self.kts2mps = 0.5144e0 # conversion from knots to m/s self.mps2kts = 1.94401e0 # conversion from m/s to knots self.lb2kg = 0.45359e0 # conversion from lb to kg self.kg2lb = 2.20462e0 # conversion from kg to lb self.mps2kph = 3.6e0 # conversion from m/s to km/hr self.hr2sec = 3600e0 # conversion from hour to sec self.sec2hr = 2.7777e-4 # conversion from sec to hour self.rpm2rps = 0.404719e0 # conversion from RPM to rad/sec self.in2m = 0.0254e0 # conversion from inch to m self.m2in = 3.93700e+1 # conversion from m to inch self.pi = 3.14159265e0 # conversion from m to inch self.min2sec = 60.0e0 # conversion from mins to seconds self.sec2min = 1.6666666e-2 # conversion from secs to mins self.nm2m = 1852.e0 # conversion from nautical miles to meters self.km2nm = 0.539957e0 # conversion from kms to nautical miles
0.231354
0.11474
from sklearn.base import TransformerMixin, BaseEstimator from docplex.mp.constants import ObjectiveSense from docplex.mp.advmodel import AdvModel from docplex.mp.utils import * import numpy as np from pandas import DataFrame class CplexTransformerBase(BaseEstimator, TransformerMixin): """ Root class for CPLEX transformers """ def __init__(self, sense="min", keep_zeros=False): self.sense = ObjectiveSense.parse(sense) # fail if error self.keep_zeros = keep_zeros def fit(self, *_): return self def transform(self, X, y=None, **transform_params): """ Main method to solve Linear Programming problemss. :param X: the matrix describing the constraints of the problem. Accepts numpy matrices, pandas dataframes, or sciPy sparse matrices :param y: an optional sequence of scalars descrining the cost vector :param transform_params: optional keyword arguments to pass additional parameters. :return: a pandas dataframe with two columns: name and value containing the values of the columns. """ # look for upper, lower bound columns in keyword args var_lbs = transform_params.get("lbs", None) var_ubs = transform_params.get("ubs", None) if is_pandas_dataframe(X): return self._transform_from_pandas(X, y, var_lbs, var_ubs, **transform_params) elif is_numpy_matrix(X): return self._transform_from_numpy(X, y, var_lbs, var_ubs, **transform_params) elif is_scipy_sparse(X): return self._transform_from_scsparse(X, y, var_lbs, var_ubs, **transform_params) elif isinstance(X, list): return self._transform_from_sequence(X, y, var_lbs, var_ubs, **transform_params) else: raise ValueError( 'transformer expects pandas dataframe, numpy matrix or python list, {0} was passed'.format(X)) def _transform_from_pandas(self, X, y, var_lbs, var_ubs, **transform_params): raise NotImplemented def _transform_from_numpy(self, X, y, var_lbs, var_ubs, **transform_params): raise NotImplemented def _transform_from_scsparse(self, X, y, var_lbs, var_ubs, **transform_params): raise NotImplemented def _transform_from_sequence(self, X, y, var_lbs, var_ubs, **transform_params): # by default, convert X to a numpy matrix return self._transform_from_numpy(np.matrix(X), y, var_lbs, var_ubs, **transform_params) def _solve_model(self, mdl, cols, colnames, costs, **params): if costs is not None: mdl.set_objective(sense=self.sense, expr=mdl.scal_prod_vars_all_different(cols, costs)) # --- lp export lp_export = params.pop('lp_export', False) lp_base = params.pop('lp_basename', None) lp_path = params.pop('lp_path', None) if lp_export: mdl.export_as_lp(basename=lp_base, path=lp_path) # --- s = mdl.solve() if s: dd = {'value': s.get_values(cols)} if colnames is not None: dd['name'] = colnames ret = DataFrame(dd) if not self.keep_zeros: ret = ret[ret['value'] != 0] ret = ret.reset_index(drop=True) return ret else: return self.new_empty_dataframe() @classmethod def new_empty_dataframe(cls): return DataFrame([]) class LPTransformer(CplexTransformerBase): """ A Scikit-learn transformer class to solve linear problems. This transformer class solves LP problems of type Ax <= B """ def __init__(self, sense="min"): """ Creates an instance of LPTransformer to solve linear problems. :param sense: defines the objective sense. Accepts 'min" or "max" (not case-sensitive), or an instance of docplex.mp.ObjectiveSense Note: The matrix X is supposed to have shape (M,N+1) where M is the number of rows and N the number of variables. The last column contains the right hand sides of the problem (the B in Ax <= B) The optional vector Y contains the N cost coefficients for each column variables. Example: Passing X = [[1,2,3], [4,5,6]], Y= [11,12,13] means solving the linear problem: minimize 11x + 12y + 13z s.t. 1x + 2y <= 3 4x + 5y <= 6 """ super(LPTransformer, self).__init__(sense) def _transform_from_pandas(self, X, y, var_lbs, var_ubs, **transform_params): assert is_pandas_dataframe(X) X_new = X.copy() # save min, max per nutrients in lists, drop them rhs = X["rhs"].tolist() X_new.drop(labels=["rhs"], inplace=True, axis=1) with AdvModel(name='lp_transformer') as mdl: x_rows, x_cols = X.shape nb_vars = x_cols - 1 varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs) senses = transform_params.get('sense', 'le') mdl.add(mdl.matrix_constraints(X_new, varlist, rhs, sense=senses)) return self._solve_model(mdl, varlist, colnames=X_new.columns, costs=y, **transform_params) def _transform_from_numpy(self, X, y, var_lbs, var_ubs, **transform_params): # matrix is nrows x (ncols + 2) # last two columns are lbs, ubs in that order assert is_numpy_matrix(X) colnames = transform_params.get("colnames", None) mshape = X.shape xr, xc = mshape assert xc >= 2 nb_vars = xc - 1 X_cts = X[:, :-1] rhs = X[:, -1].A1 with AdvModel(name='lp_transformer') as mdl: varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs, name=colnames) senses = transform_params.get('sense', 'le') mdl.add(mdl.matrix_constraints(X_cts, varlist, rhs, sense=senses)) return self._solve_model(mdl, varlist, colnames, costs=y, **transform_params) def _transform_from_scsparse(self, X, y, var_lbs, var_ubs, **transform_params): assert is_scipy_sparse(X) colnames = transform_params.get("colnames", None) mshape = X.shape nr, nc = mshape assert nc == nr + 1 nb_vars = nc - 1 with AdvModel(name='lp_transformer') as mdl: varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs) lfactory = mdl._lfactory r_rows = range(nr) exprs = [lfactory.linear_expr() for _ in r_rows] rhss = [0] * nr # convert to coo before iterate() x_coo = X.tocoo() for coef, row, col in izip(x_coo.data, x_coo.row, x_coo.col): if col < nr: exprs[row]._add_term(varlist[col], coef) else: rhss[row] = coef senses = transform_params.get('sense', 'le') cts = [lfactory.new_binary_constraint(exprs[r], rhs=rhss[r], sense=senses) for r in r_rows] lfactory._post_constraint_block(cts) return self._solve_model(mdl, varlist, colnames, costs=y, **transform_params) class LPRangeTransformer(CplexTransformerBase): def __init__(self, sense="min"): """ Creates an instance of LPRangeTransformer to solve range-based linear problems. :param sense: defines the objective sense. Accepts 'min" or "max" (not case-sensitive), or an instance of docplex.mp.ObjectiveSense Note: The matrix X is supposed to have shape (M,N+2) where M is the number of rows and N the number of variables. The last two columns are assumed to contain the minimum (resp.maximum) values for the row ranges, that m and M in: m <= Ax <= M The optional vector Y contains the N cost coefficients for each column variables. Example: Passing X = [[1,2,3,30], [4,5,6,60]], Y= [11,12,13] means solving the linear problem: minimize 11x + 12y + 13z s.t. 3 <= 1x + 2y <= 30 6 <= 4x + 5y <= 60 """ super(LPRangeTransformer, self).__init__(sense) def _transform_from_pandas(self, X, y, var_lbs, var_ubs, **transform_params): assert is_pandas_dataframe(X) x_rows, x_cols = X.shape X_new = X.copy() # extract columns with name 'min' and 'max' as series then drop row_mins = X["min"].tolist() row_maxs = X["max"].tolist() X_new.drop(labels=["min", "max"], inplace=True, axis=1) with AdvModel(name='lp_range_trasnformer') as mdl: nb_vars = x_cols - 2 varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs) mdl.add(mdl.matrix_ranges(X_new, varlist, row_mins, row_maxs)) return self._solve_model(mdl, varlist, colnames=X_new.columns, costs=y, **transform_params) def _transform_from_numpy(self, X, y, var_lbs, var_ubs, **transform_params): # matrix is nrows x (ncols + 2) # last two columns are lbs, ubs in that order assert is_numpy_matrix(X) colnames = transform_params.pop("colnames", None) mshape = X.shape xr, xc = mshape assert xc >= 3 nb_vars = xc - 2 X_cts = X[:, :-2] row_mins = X[:, -2] row_maxs = X[:, -1] with AdvModel(name='lp_range_transformer') as mdl: varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs, name=colnames) mdl.add(mdl.matrix_ranges(X_cts, varlist, row_mins, row_maxs)) return self._solve_model(mdl, varlist, colnames, costs=y, **transform_params)
ukpsummarizer-be/cplex/python/docplex/docplex/mp/sktrans/transformers.py
from sklearn.base import TransformerMixin, BaseEstimator from docplex.mp.constants import ObjectiveSense from docplex.mp.advmodel import AdvModel from docplex.mp.utils import * import numpy as np from pandas import DataFrame class CplexTransformerBase(BaseEstimator, TransformerMixin): """ Root class for CPLEX transformers """ def __init__(self, sense="min", keep_zeros=False): self.sense = ObjectiveSense.parse(sense) # fail if error self.keep_zeros = keep_zeros def fit(self, *_): return self def transform(self, X, y=None, **transform_params): """ Main method to solve Linear Programming problemss. :param X: the matrix describing the constraints of the problem. Accepts numpy matrices, pandas dataframes, or sciPy sparse matrices :param y: an optional sequence of scalars descrining the cost vector :param transform_params: optional keyword arguments to pass additional parameters. :return: a pandas dataframe with two columns: name and value containing the values of the columns. """ # look for upper, lower bound columns in keyword args var_lbs = transform_params.get("lbs", None) var_ubs = transform_params.get("ubs", None) if is_pandas_dataframe(X): return self._transform_from_pandas(X, y, var_lbs, var_ubs, **transform_params) elif is_numpy_matrix(X): return self._transform_from_numpy(X, y, var_lbs, var_ubs, **transform_params) elif is_scipy_sparse(X): return self._transform_from_scsparse(X, y, var_lbs, var_ubs, **transform_params) elif isinstance(X, list): return self._transform_from_sequence(X, y, var_lbs, var_ubs, **transform_params) else: raise ValueError( 'transformer expects pandas dataframe, numpy matrix or python list, {0} was passed'.format(X)) def _transform_from_pandas(self, X, y, var_lbs, var_ubs, **transform_params): raise NotImplemented def _transform_from_numpy(self, X, y, var_lbs, var_ubs, **transform_params): raise NotImplemented def _transform_from_scsparse(self, X, y, var_lbs, var_ubs, **transform_params): raise NotImplemented def _transform_from_sequence(self, X, y, var_lbs, var_ubs, **transform_params): # by default, convert X to a numpy matrix return self._transform_from_numpy(np.matrix(X), y, var_lbs, var_ubs, **transform_params) def _solve_model(self, mdl, cols, colnames, costs, **params): if costs is not None: mdl.set_objective(sense=self.sense, expr=mdl.scal_prod_vars_all_different(cols, costs)) # --- lp export lp_export = params.pop('lp_export', False) lp_base = params.pop('lp_basename', None) lp_path = params.pop('lp_path', None) if lp_export: mdl.export_as_lp(basename=lp_base, path=lp_path) # --- s = mdl.solve() if s: dd = {'value': s.get_values(cols)} if colnames is not None: dd['name'] = colnames ret = DataFrame(dd) if not self.keep_zeros: ret = ret[ret['value'] != 0] ret = ret.reset_index(drop=True) return ret else: return self.new_empty_dataframe() @classmethod def new_empty_dataframe(cls): return DataFrame([]) class LPTransformer(CplexTransformerBase): """ A Scikit-learn transformer class to solve linear problems. This transformer class solves LP problems of type Ax <= B """ def __init__(self, sense="min"): """ Creates an instance of LPTransformer to solve linear problems. :param sense: defines the objective sense. Accepts 'min" or "max" (not case-sensitive), or an instance of docplex.mp.ObjectiveSense Note: The matrix X is supposed to have shape (M,N+1) where M is the number of rows and N the number of variables. The last column contains the right hand sides of the problem (the B in Ax <= B) The optional vector Y contains the N cost coefficients for each column variables. Example: Passing X = [[1,2,3], [4,5,6]], Y= [11,12,13] means solving the linear problem: minimize 11x + 12y + 13z s.t. 1x + 2y <= 3 4x + 5y <= 6 """ super(LPTransformer, self).__init__(sense) def _transform_from_pandas(self, X, y, var_lbs, var_ubs, **transform_params): assert is_pandas_dataframe(X) X_new = X.copy() # save min, max per nutrients in lists, drop them rhs = X["rhs"].tolist() X_new.drop(labels=["rhs"], inplace=True, axis=1) with AdvModel(name='lp_transformer') as mdl: x_rows, x_cols = X.shape nb_vars = x_cols - 1 varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs) senses = transform_params.get('sense', 'le') mdl.add(mdl.matrix_constraints(X_new, varlist, rhs, sense=senses)) return self._solve_model(mdl, varlist, colnames=X_new.columns, costs=y, **transform_params) def _transform_from_numpy(self, X, y, var_lbs, var_ubs, **transform_params): # matrix is nrows x (ncols + 2) # last two columns are lbs, ubs in that order assert is_numpy_matrix(X) colnames = transform_params.get("colnames", None) mshape = X.shape xr, xc = mshape assert xc >= 2 nb_vars = xc - 1 X_cts = X[:, :-1] rhs = X[:, -1].A1 with AdvModel(name='lp_transformer') as mdl: varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs, name=colnames) senses = transform_params.get('sense', 'le') mdl.add(mdl.matrix_constraints(X_cts, varlist, rhs, sense=senses)) return self._solve_model(mdl, varlist, colnames, costs=y, **transform_params) def _transform_from_scsparse(self, X, y, var_lbs, var_ubs, **transform_params): assert is_scipy_sparse(X) colnames = transform_params.get("colnames", None) mshape = X.shape nr, nc = mshape assert nc == nr + 1 nb_vars = nc - 1 with AdvModel(name='lp_transformer') as mdl: varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs) lfactory = mdl._lfactory r_rows = range(nr) exprs = [lfactory.linear_expr() for _ in r_rows] rhss = [0] * nr # convert to coo before iterate() x_coo = X.tocoo() for coef, row, col in izip(x_coo.data, x_coo.row, x_coo.col): if col < nr: exprs[row]._add_term(varlist[col], coef) else: rhss[row] = coef senses = transform_params.get('sense', 'le') cts = [lfactory.new_binary_constraint(exprs[r], rhs=rhss[r], sense=senses) for r in r_rows] lfactory._post_constraint_block(cts) return self._solve_model(mdl, varlist, colnames, costs=y, **transform_params) class LPRangeTransformer(CplexTransformerBase): def __init__(self, sense="min"): """ Creates an instance of LPRangeTransformer to solve range-based linear problems. :param sense: defines the objective sense. Accepts 'min" or "max" (not case-sensitive), or an instance of docplex.mp.ObjectiveSense Note: The matrix X is supposed to have shape (M,N+2) where M is the number of rows and N the number of variables. The last two columns are assumed to contain the minimum (resp.maximum) values for the row ranges, that m and M in: m <= Ax <= M The optional vector Y contains the N cost coefficients for each column variables. Example: Passing X = [[1,2,3,30], [4,5,6,60]], Y= [11,12,13] means solving the linear problem: minimize 11x + 12y + 13z s.t. 3 <= 1x + 2y <= 30 6 <= 4x + 5y <= 60 """ super(LPRangeTransformer, self).__init__(sense) def _transform_from_pandas(self, X, y, var_lbs, var_ubs, **transform_params): assert is_pandas_dataframe(X) x_rows, x_cols = X.shape X_new = X.copy() # extract columns with name 'min' and 'max' as series then drop row_mins = X["min"].tolist() row_maxs = X["max"].tolist() X_new.drop(labels=["min", "max"], inplace=True, axis=1) with AdvModel(name='lp_range_trasnformer') as mdl: nb_vars = x_cols - 2 varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs) mdl.add(mdl.matrix_ranges(X_new, varlist, row_mins, row_maxs)) return self._solve_model(mdl, varlist, colnames=X_new.columns, costs=y, **transform_params) def _transform_from_numpy(self, X, y, var_lbs, var_ubs, **transform_params): # matrix is nrows x (ncols + 2) # last two columns are lbs, ubs in that order assert is_numpy_matrix(X) colnames = transform_params.pop("colnames", None) mshape = X.shape xr, xc = mshape assert xc >= 3 nb_vars = xc - 2 X_cts = X[:, :-2] row_mins = X[:, -2] row_maxs = X[:, -1] with AdvModel(name='lp_range_transformer') as mdl: varlist = mdl.continuous_var_list(nb_vars, lb=var_lbs, ub=var_ubs, name=colnames) mdl.add(mdl.matrix_ranges(X_cts, varlist, row_mins, row_maxs)) return self._solve_model(mdl, varlist, colnames, costs=y, **transform_params)
0.865452
0.330188
import logging from ops.framework import ( StoredState, EventBase, ObjectEvents, EventSource, Object) class HasPeersEvent(EventBase): pass class ReadyPeersEvent(EventBase): pass class CephBenchmarkingPeerEvents(ObjectEvents): has_peers = EventSource(HasPeersEvent) ready_peers = EventSource(ReadyPeersEvent) class CephBenchmarkingPeers(Object): on = CephBenchmarkingPeerEvents() state = StoredState() SWIFT_KEY = "swift_key" SWIFT_USER_CREATED = "swift_user_created" def __init__(self, charm, relation_name): super().__init__(charm, relation_name) self.relation_name = relation_name self.this_unit = self.framework.model.unit self.framework.observe( charm.on[relation_name].relation_changed, self.on_changed) def on_changed(self, event): logging.info("CephBenchmarkingPeers on_changed") self.on.has_peers.emit() if self.ready_peer_details: self.on.ready_peers.emit() def set_swift_key(self, password): logging.info("Setting swift key") self.peers_rel.data[self.peers_rel.app][self.SWIFT_KEY] = password def set_swift_user_created(self, user): logging.info("Setting swift user created") self.peers_rel.data[self.peers_rel.app][self.SWIFT_USER_CREATED] = user @property def ready_peer_details(self): peers = { self.framework.model.unit.name: { 'ip': self.peers_bind_address}} for u in self.peers_rel.units: peers[u.name] = { 'ip': self.peers_rel.data[u]['ingress-address']} return peers @property def is_joined(self): return self.peers_rel is not None @property def peers_rel(self): return self.framework.model.get_relation(self.relation_name) @property def peers_binding(self): return self.framework.model.get_binding(self.peers_rel) @property def peers_bind_address(self): return str(self.peers_binding.network.bind_address) @property def swift_key(self): if not self.peers_rel: return None return self.peers_rel.data[self.peers_rel.app].get(self.SWIFT_KEY) @property def swift_user_created(self): if not self.peers_rel: return None return self.peers_rel.data[ self.peers_rel.app].get(self.SWIFT_USER_CREATED) @property def peer_addresses(self): addresses = [self.peers_bind_address] for u in self.peers_rel.units: addresses.append(self.peers_rel.data[u]['ingress-address']) return sorted(addresses) @property def peers_count(self): if self.peers_rel: return len(self.peers_rel.units) else: return 0 @property def unit_count(self): return self.peers_count + 1
src/interface_ceph_benchmarking_peers.py
import logging from ops.framework import ( StoredState, EventBase, ObjectEvents, EventSource, Object) class HasPeersEvent(EventBase): pass class ReadyPeersEvent(EventBase): pass class CephBenchmarkingPeerEvents(ObjectEvents): has_peers = EventSource(HasPeersEvent) ready_peers = EventSource(ReadyPeersEvent) class CephBenchmarkingPeers(Object): on = CephBenchmarkingPeerEvents() state = StoredState() SWIFT_KEY = "swift_key" SWIFT_USER_CREATED = "swift_user_created" def __init__(self, charm, relation_name): super().__init__(charm, relation_name) self.relation_name = relation_name self.this_unit = self.framework.model.unit self.framework.observe( charm.on[relation_name].relation_changed, self.on_changed) def on_changed(self, event): logging.info("CephBenchmarkingPeers on_changed") self.on.has_peers.emit() if self.ready_peer_details: self.on.ready_peers.emit() def set_swift_key(self, password): logging.info("Setting swift key") self.peers_rel.data[self.peers_rel.app][self.SWIFT_KEY] = password def set_swift_user_created(self, user): logging.info("Setting swift user created") self.peers_rel.data[self.peers_rel.app][self.SWIFT_USER_CREATED] = user @property def ready_peer_details(self): peers = { self.framework.model.unit.name: { 'ip': self.peers_bind_address}} for u in self.peers_rel.units: peers[u.name] = { 'ip': self.peers_rel.data[u]['ingress-address']} return peers @property def is_joined(self): return self.peers_rel is not None @property def peers_rel(self): return self.framework.model.get_relation(self.relation_name) @property def peers_binding(self): return self.framework.model.get_binding(self.peers_rel) @property def peers_bind_address(self): return str(self.peers_binding.network.bind_address) @property def swift_key(self): if not self.peers_rel: return None return self.peers_rel.data[self.peers_rel.app].get(self.SWIFT_KEY) @property def swift_user_created(self): if not self.peers_rel: return None return self.peers_rel.data[ self.peers_rel.app].get(self.SWIFT_USER_CREATED) @property def peer_addresses(self): addresses = [self.peers_bind_address] for u in self.peers_rel.units: addresses.append(self.peers_rel.data[u]['ingress-address']) return sorted(addresses) @property def peers_count(self): if self.peers_rel: return len(self.peers_rel.units) else: return 0 @property def unit_count(self): return self.peers_count + 1
0.71721
0.090133
from Jumpscale import j import binascii from io import BytesIO import json import os from JumpscaleLibs.servers.mail.smtp import app from JumpscaleLibs.servers.mail.imap.bcdbmailbox import BCDBMailboxdir class mail(j.baseclasses.threebot_actor): def _init(self, **kwargs): models = j.servers.imap.get_models() self.bcdb_mailbox = BCDBMailboxdir(models) def send(self, mail, schema_out=None, user_session=None): """ ```in mail = (O) !email.message.1 ``` ```out success = (B) ``` """ server = app.MailServer() mail_stored = server.store_mail(mail._ddict, is_send=True) self.bcdb_mailbox.create_folder(mail_stored.folder) out = schema_out.new() out.success = True return out def list(self, date_from=None, date_to=None, user_session=None): """ ```in date_from = (D) date_to = (D) ``` """ if date_from and date_to: date_from = j.data.types.date.clean(date_from) date_to = j.data.types.date.clean(date_to) query = "WHERE date BETWEEN {} and {}".format(date_from, date_to) mails = self.bcdb_mailbox.get_messages(query).fetchall() return json.dumps([self.bcdb_mailbox.get_object(o[0])._ddict for o in mails]) mails = self.bcdb_mailbox.get_messages() return json.dumps([o._ddict for o in mails]) def list_folders(self, user_session=None): """ """ folders = self.bcdb_mailbox.list_folders() return folders def create_folder(self, name, schema_out=None, user_session=None): """ ```in name = (S) ``` ```out success = (B) ``` """ self.bcdb_mailbox.create_folder(name) out = schema_out.new() out.success = True return out def update_folder_name(self, old_name, new_name, schema_out=None, user_session=None): """ ```in old_name = (S) new_name = (S) ``` ```out success = (B) ``` """ self.bcdb_mailbox.rename_folder(old_name, new_name) out = schema_out.new() out.success = True return out def move_message(self, mail_id, folder_name, schema_out=None, user_session=None): """ ```in mail_id = (I) folder_name = (S) ``` ```out success = (B) ``` """ model = self.bcdb_mailbox.get_object(mail_id) model.folder = folder_name model.save() out = schema_out.new() out.success = True return out def delete(self, mail_id, schema_out=None, user_session=None): """ ```in mail_id = (I) ``` ```out success = (B) ``` """ self.bcdb_mailbox.remove(mail_id) out = schema_out.new() out.success = True return out def update_priority(self, mail_id, priority, schema_out=None, user_session=None): """ ```in mail_id = (I) priority = (B) ``` ```out success = (B) ``` """ model = self.bcdb_mailbox.get_object(mail_id) model.priority = priority model.save() out = schema_out.new() out.success = True return out def receive(self, mail, schema_out=None, user_session=None): """ ```in mail = (O) !email.message.1 ``` ```out success = (B) ``` """ server = app.MailServer() mail_stored = server.store_mail(mail._ddict) self.bcdb_mailbox.create_folder(mail_stored.folder) out = schema_out.new() out.success = True return out
ThreeBotPackages/threebot/mail/actors/mail.py
from Jumpscale import j import binascii from io import BytesIO import json import os from JumpscaleLibs.servers.mail.smtp import app from JumpscaleLibs.servers.mail.imap.bcdbmailbox import BCDBMailboxdir class mail(j.baseclasses.threebot_actor): def _init(self, **kwargs): models = j.servers.imap.get_models() self.bcdb_mailbox = BCDBMailboxdir(models) def send(self, mail, schema_out=None, user_session=None): """ ```in mail = (O) !email.message.1 ``` ```out success = (B) ``` """ server = app.MailServer() mail_stored = server.store_mail(mail._ddict, is_send=True) self.bcdb_mailbox.create_folder(mail_stored.folder) out = schema_out.new() out.success = True return out def list(self, date_from=None, date_to=None, user_session=None): """ ```in date_from = (D) date_to = (D) ``` """ if date_from and date_to: date_from = j.data.types.date.clean(date_from) date_to = j.data.types.date.clean(date_to) query = "WHERE date BETWEEN {} and {}".format(date_from, date_to) mails = self.bcdb_mailbox.get_messages(query).fetchall() return json.dumps([self.bcdb_mailbox.get_object(o[0])._ddict for o in mails]) mails = self.bcdb_mailbox.get_messages() return json.dumps([o._ddict for o in mails]) def list_folders(self, user_session=None): """ """ folders = self.bcdb_mailbox.list_folders() return folders def create_folder(self, name, schema_out=None, user_session=None): """ ```in name = (S) ``` ```out success = (B) ``` """ self.bcdb_mailbox.create_folder(name) out = schema_out.new() out.success = True return out def update_folder_name(self, old_name, new_name, schema_out=None, user_session=None): """ ```in old_name = (S) new_name = (S) ``` ```out success = (B) ``` """ self.bcdb_mailbox.rename_folder(old_name, new_name) out = schema_out.new() out.success = True return out def move_message(self, mail_id, folder_name, schema_out=None, user_session=None): """ ```in mail_id = (I) folder_name = (S) ``` ```out success = (B) ``` """ model = self.bcdb_mailbox.get_object(mail_id) model.folder = folder_name model.save() out = schema_out.new() out.success = True return out def delete(self, mail_id, schema_out=None, user_session=None): """ ```in mail_id = (I) ``` ```out success = (B) ``` """ self.bcdb_mailbox.remove(mail_id) out = schema_out.new() out.success = True return out def update_priority(self, mail_id, priority, schema_out=None, user_session=None): """ ```in mail_id = (I) priority = (B) ``` ```out success = (B) ``` """ model = self.bcdb_mailbox.get_object(mail_id) model.priority = priority model.save() out = schema_out.new() out.success = True return out def receive(self, mail, schema_out=None, user_session=None): """ ```in mail = (O) !email.message.1 ``` ```out success = (B) ``` """ server = app.MailServer() mail_stored = server.store_mail(mail._ddict) self.bcdb_mailbox.create_folder(mail_stored.folder) out = schema_out.new() out.success = True return out
0.52756
0.540257
from errno import EEXIST import os from os.path import exists, expanduser, join import pandas as pd def hidden(path): """Check if a path is hidden. Parameters ---------- path : str A filepath. """ return os.path.split(path)[1].startswith('.') def ensure_directory(path): """ Ensure that a directory named "path" exists. """ try: os.makedirs(path) except OSError as exc: if exc.errno == EEXIST and os.path.isdir(path): return raise def ensure_directory_containing(path): """ Ensure that the directory containing `path` exists. This is just a convenience wrapper for doing:: ensure_directory(os.path.dirname(path)) """ ensure_directory(os.path.dirname(path)) def ensure_file(path): """ Ensure that a file exists. This will create any parent directories needed and create an empty file if it does not exists. Parameters ---------- path : str The file path to ensure exists. """ ensure_directory_containing(path) open(path, 'a+').close() # touch the file def last_modified_time(path): """ Get the last modified time of path as a Timestamp. """ return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC') def modified_since(path, dt): """ Check whether `path` was modified since `dt`. Returns False if path doesn't exist. Parameters ---------- path : str Path to the file to be checked. dt : pd.Timestamp The date against which to compare last_modified_time(path). Returns ------- was_modified : bool Will be ``False`` if path doesn't exists, or if its last modified date is earlier than or equal to `dt` """ return exists(path) and last_modified_time(path) > dt def zipline_root(environ=None): """ Get the root directory for all zipline-managed files. For testing purposes, this accepts a dictionary to interpret as the os environment. Parameters ---------- environ : dict, optional A dict to interpret as the os environment. Returns ------- root : string Path to the zipline root dir. """ if environ is None: environ = os.environ root = environ.get('ZIPLINE_ROOT', None) if root is None: root = expanduser('~/.zipline') return root def zipline_path(paths, environ=None): """ Get a path relative to the zipline root. Parameters ---------- paths : list[str] List of requested path pieces. environ : dict, optional An environment dict to forward to zipline_root. Returns ------- newpath : str The requested path joined with the zipline root. """ return join(zipline_root(environ=environ), *paths) def default_extension(environ=None): """ Get the path to the default zipline extension file. Parameters ---------- environ : dict, optional An environment dict to forwart to zipline_root. Returns ------- default_extension_path : str The file path to the default zipline extension file. """ return zipline_path(['extension.py'], environ=environ) def data_root(environ=None): """ The root directory for zipline data files. Parameters ---------- environ : dict, optional An environment dict to forward to zipline_root. Returns ------- data_root : str The zipline data root. """ return zipline_path(['data'], environ=environ) def ensure_data_root(environ=None): """ Ensure that the data root exists. """ ensure_directory(data_root(environ=environ)) def data_path(paths, environ=None): """ Get a path relative to the zipline data directory. Parameters ---------- paths : iterable[str] List of requested path pieces. environ : dict, optional An environment dict to forward to zipline_root. Returns ------- newpath : str The requested path joined with the zipline data root. """ return zipline_path(['data'] + list(paths), environ=environ) def cache_root(environ=None): """ The root directory for zipline cache files. Parameters ---------- environ : dict, optional An environment dict to forward to zipline_root. Returns ------- cache_root : str The zipline cache root. """ return zipline_path(['cache'], environ=environ) def ensure_cache_root(environ=None): """ Ensure that the data root exists. """ ensure_directory(cache_root(environ=environ)) def cache_path(paths, environ=None): """ Get a path relative to the zipline cache directory. Parameters ---------- paths : iterable[str] List of requested path pieces. environ : dict, optional An environment dict to forward to zipline_root. Returns ------- newpath : str The requested path joined with the zipline cache root. """ return zipline_path(['cache'] + list(paths), environ=environ)
zipline/utils/paths.py
from errno import EEXIST import os from os.path import exists, expanduser, join import pandas as pd def hidden(path): """Check if a path is hidden. Parameters ---------- path : str A filepath. """ return os.path.split(path)[1].startswith('.') def ensure_directory(path): """ Ensure that a directory named "path" exists. """ try: os.makedirs(path) except OSError as exc: if exc.errno == EEXIST and os.path.isdir(path): return raise def ensure_directory_containing(path): """ Ensure that the directory containing `path` exists. This is just a convenience wrapper for doing:: ensure_directory(os.path.dirname(path)) """ ensure_directory(os.path.dirname(path)) def ensure_file(path): """ Ensure that a file exists. This will create any parent directories needed and create an empty file if it does not exists. Parameters ---------- path : str The file path to ensure exists. """ ensure_directory_containing(path) open(path, 'a+').close() # touch the file def last_modified_time(path): """ Get the last modified time of path as a Timestamp. """ return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC') def modified_since(path, dt): """ Check whether `path` was modified since `dt`. Returns False if path doesn't exist. Parameters ---------- path : str Path to the file to be checked. dt : pd.Timestamp The date against which to compare last_modified_time(path). Returns ------- was_modified : bool Will be ``False`` if path doesn't exists, or if its last modified date is earlier than or equal to `dt` """ return exists(path) and last_modified_time(path) > dt def zipline_root(environ=None): """ Get the root directory for all zipline-managed files. For testing purposes, this accepts a dictionary to interpret as the os environment. Parameters ---------- environ : dict, optional A dict to interpret as the os environment. Returns ------- root : string Path to the zipline root dir. """ if environ is None: environ = os.environ root = environ.get('ZIPLINE_ROOT', None) if root is None: root = expanduser('~/.zipline') return root def zipline_path(paths, environ=None): """ Get a path relative to the zipline root. Parameters ---------- paths : list[str] List of requested path pieces. environ : dict, optional An environment dict to forward to zipline_root. Returns ------- newpath : str The requested path joined with the zipline root. """ return join(zipline_root(environ=environ), *paths) def default_extension(environ=None): """ Get the path to the default zipline extension file. Parameters ---------- environ : dict, optional An environment dict to forwart to zipline_root. Returns ------- default_extension_path : str The file path to the default zipline extension file. """ return zipline_path(['extension.py'], environ=environ) def data_root(environ=None): """ The root directory for zipline data files. Parameters ---------- environ : dict, optional An environment dict to forward to zipline_root. Returns ------- data_root : str The zipline data root. """ return zipline_path(['data'], environ=environ) def ensure_data_root(environ=None): """ Ensure that the data root exists. """ ensure_directory(data_root(environ=environ)) def data_path(paths, environ=None): """ Get a path relative to the zipline data directory. Parameters ---------- paths : iterable[str] List of requested path pieces. environ : dict, optional An environment dict to forward to zipline_root. Returns ------- newpath : str The requested path joined with the zipline data root. """ return zipline_path(['data'] + list(paths), environ=environ) def cache_root(environ=None): """ The root directory for zipline cache files. Parameters ---------- environ : dict, optional An environment dict to forward to zipline_root. Returns ------- cache_root : str The zipline cache root. """ return zipline_path(['cache'], environ=environ) def ensure_cache_root(environ=None): """ Ensure that the data root exists. """ ensure_directory(cache_root(environ=environ)) def cache_path(paths, environ=None): """ Get a path relative to the zipline cache directory. Parameters ---------- paths : iterable[str] List of requested path pieces. environ : dict, optional An environment dict to forward to zipline_root. Returns ------- newpath : str The requested path joined with the zipline cache root. """ return zipline_path(['cache'] + list(paths), environ=environ)
0.827515
0.444384